Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125  'mongodb://user:password@server:port/database' # experimental 
  126   
  127  For more info: 
  128  help(DAL) 
  129  help(Field) 
  130  """ 
  131   
  132  ################################################################################### 
  133  # this file only exposes DAL and Field 
  134  ################################################################################### 
  135   
  136  __all__ = ['DAL', 'Field'] 
  137   
  138  DEFAULTLENGTH = {'string':512, 
  139                   'password':512, 
  140                   'upload':512, 
  141                   'text':2**15, 
  142                   'blob':2**31} 
  143  TIMINGSSIZE = 100 
  144  SPATIALLIBS = { 
  145      'Windows':'libspatialite', 
  146      'Linux':'libspatialite.so', 
  147      'Darwin':'libspatialite.dylib' 
  148      } 
  149  DEFAULT_URI = 'sqlite://dummy.db' 
  150   
  151  import re 
  152  import sys 
  153  import locale 
  154  import os 
  155  import types 
  156  import datetime 
  157  import threading 
  158  import time 
  159  import csv 
  160  import cgi 
  161  import copy 
  162  import socket 
  163  import logging 
  164  import base64 
  165  import shutil 
  166  import marshal 
  167  import decimal 
  168  import struct 
  169  import urllib 
  170  import hashlib 
  171  import uuid 
  172  import glob 
  173  import traceback 
  174  import platform 
  175   
  176  PYTHON_VERSION = sys.version_info[:3] 
  177  if PYTHON_VERSION[0] == 2: 
  178      import cPickle as pickle 
  179      import cStringIO as StringIO 
  180      import copy_reg as copyreg 
  181      hashlib_md5 = hashlib.md5 
  182      bytes, unicode = str, unicode 
  183  else: 
  184      import pickle 
  185      from io import StringIO as StringIO 
  186      import copyreg 
  187      long = int 
  188      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  189      bytes, unicode = bytes, str 
  190   
  191  if PYTHON_VERSION[:2] < (2, 7): 
  192      from gluon.contrib.ordereddict import OrderedDict 
  193  else: 
  194      from collections import OrderedDict 
  195   
  196   
  197  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  198                   types.BuiltinFunctionType, 
  199                   types.MethodType, types.BuiltinMethodType) 
  200   
  201  TABLE_ARGS = set( 
  202      ('migrate','primarykey','fake_migrate','format','redefine', 
  203       'singular','plural','trigger_name','sequence_name','fields', 
  204       'common_filter','polymodel','table_class','on_define','rname')) 
  205   
  206  SELECT_ARGS = set( 
  207      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  208       'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) 
  209   
  210  ogetattr = object.__getattribute__ 
  211  osetattr = object.__setattr__ 
  212  exists = os.path.exists 
  213  pjoin = os.path.join 
  214   
  215  ################################################################################### 
  216  # following checks allow the use of dal without web2py, as a standalone module 
  217  ################################################################################### 
  218  try: 
  219      from gluon.utils import web2py_uuid 
  220  except (ImportError, SystemError): 
  221      import uuid 
222 - def web2py_uuid(): return str(uuid.uuid4())
223 224 try: 225 import portalocker 226 have_portalocker = True 227 except ImportError: 228 have_portalocker = False 229 230 try: 231 from gluon import serializers 232 have_serializers = True 233 except ImportError: 234 have_serializers = False 235 try: 236 import json as simplejson 237 except ImportError: 238 try: 239 import gluon.contrib.simplejson as simplejson 240 except ImportError: 241 simplejson = None 242 243 LOGGER = logging.getLogger("web2py.dal") 244 DEFAULT = lambda:0 245 246 GLOBAL_LOCKER = threading.RLock() 247 THREAD_LOCAL = threading.local() 248 249 # internal representation of tables with field 250 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 251 252 REGEX_TYPE = re.compile('^([\w\_\:]+)') 253 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 254 REGEX_W = re.compile('^\w+$') 255 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.([^.]+)$') 256 REGEX_NO_GREEDY_ENTITY_NAME = r'(.+?)' 257 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)(\.(?P<name>\w+))?\.\w+$') 258 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 259 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 260 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 261 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 262 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 263 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 264 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 265 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 266 REGEX_QUOTES = re.compile("'[^']*'") 267 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 268 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 269 REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)' 270 271 # list of drivers will be built on the fly 272 # and lists only what is available 273 DRIVERS = [] 274 275 try: 276 from new import classobj 277 from google.appengine.ext import db as gae 278 from google.appengine.ext import ndb 279 from google.appengine.api import namespace_manager, rdbms 280 from google.appengine.api.datastore_types import Key ### for belongs on ID 281 from google.appengine.ext.db.polymodel import PolyModel 282 from google.appengine.ext.ndb.polymodel import PolyModel as NDBPolyModel 283 DRIVERS.append('google') 284 except ImportError: 285 pass 286 287 if not 'google' in DRIVERS: 288 289 try: 290 from pysqlite2 import dbapi2 as sqlite2 291 DRIVERS.append('SQLite(sqlite2)') 292 except ImportError: 293 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 294 295 try: 296 from sqlite3 import dbapi2 as sqlite3 297 DRIVERS.append('SQLite(sqlite3)') 298 except ImportError: 299 LOGGER.debug('no SQLite drivers sqlite3') 300 301 try: 302 # first try contrib driver, then from site-packages (if installed) 303 try: 304 import gluon.contrib.pymysql as pymysql 305 # monkeypatch pymysql because they havent fixed the bug: 306 # https://github.com/petehunt/PyMySQL/issues/86 307 pymysql.ESCAPE_REGEX = re.compile("'") 308 pymysql.ESCAPE_MAP = {"'": "''"} 309 # end monkeypatch 310 except ImportError: 311 import pymysql 312 DRIVERS.append('MySQL(pymysql)') 313 except ImportError: 314 LOGGER.debug('no MySQL driver pymysql') 315 316 try: 317 import MySQLdb 318 DRIVERS.append('MySQL(MySQLdb)') 319 except ImportError: 320 LOGGER.debug('no MySQL driver MySQLDB') 321 322 try: 323 import mysql.connector as mysqlconnector 324 DRIVERS.append("MySQL(mysqlconnector)") 325 except ImportError: 326 LOGGER.debug("no driver mysql.connector") 327 328 try: 329 import psycopg2 330 from psycopg2.extensions import adapt as psycopg2_adapt 331 DRIVERS.append('PostgreSQL(psycopg2)') 332 except ImportError: 333 LOGGER.debug('no PostgreSQL driver psycopg2') 334 335 try: 336 # first try contrib driver, then from site-packages (if installed) 337 try: 338 import gluon.contrib.pg8000.dbapi as pg8000 339 except ImportError: 340 import pg8000.dbapi as pg8000 341 DRIVERS.append('PostgreSQL(pg8000)') 342 except ImportError: 343 LOGGER.debug('no PostgreSQL driver pg8000') 344 345 try: 346 import cx_Oracle 347 DRIVERS.append('Oracle(cx_Oracle)') 348 except ImportError: 349 LOGGER.debug('no Oracle driver cx_Oracle') 350 351 try: 352 try: 353 import pyodbc 354 except ImportError: 355 try: 356 import gluon.contrib.pypyodbc as pyodbc 357 except Exception, e: 358 raise ImportError(str(e)) 359 DRIVERS.append('MSSQL(pyodbc)') 360 DRIVERS.append('DB2(pyodbc)') 361 DRIVERS.append('Teradata(pyodbc)') 362 DRIVERS.append('Ingres(pyodbc)') 363 except ImportError: 364 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 365 366 try: 367 import Sybase 368 DRIVERS.append('Sybase(Sybase)') 369 except ImportError: 370 LOGGER.debug('no Sybase driver') 371 372 try: 373 import kinterbasdb 374 DRIVERS.append('Interbase(kinterbasdb)') 375 DRIVERS.append('Firebird(kinterbasdb)') 376 except ImportError: 377 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 378 379 try: 380 import fdb 381 DRIVERS.append('Firebird(fdb)') 382 except ImportError: 383 LOGGER.debug('no Firebird driver fdb') 384 ##### 385 try: 386 import firebirdsql 387 DRIVERS.append('Firebird(firebirdsql)') 388 except ImportError: 389 LOGGER.debug('no Firebird driver firebirdsql') 390 391 try: 392 import informixdb 393 DRIVERS.append('Informix(informixdb)') 394 LOGGER.warning('Informix support is experimental') 395 except ImportError: 396 LOGGER.debug('no Informix driver informixdb') 397 398 try: 399 import sapdb 400 DRIVERS.append('SQL(sapdb)') 401 LOGGER.warning('SAPDB support is experimental') 402 except ImportError: 403 LOGGER.debug('no SAP driver sapdb') 404 405 try: 406 import cubriddb 407 DRIVERS.append('Cubrid(cubriddb)') 408 LOGGER.warning('Cubrid support is experimental') 409 except ImportError: 410 LOGGER.debug('no Cubrid driver cubriddb') 411 412 try: 413 from com.ziclix.python.sql import zxJDBC 414 import java.sql 415 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 416 from org.sqlite import JDBC # required by java.sql; ensure we have it 417 zxJDBC_sqlite = java.sql.DriverManager 418 DRIVERS.append('PostgreSQL(zxJDBC)') 419 DRIVERS.append('SQLite(zxJDBC)') 420 LOGGER.warning('zxJDBC support is experimental') 421 is_jdbc = True 422 except ImportError: 423 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 424 is_jdbc = False 425 426 try: 427 import couchdb 428 DRIVERS.append('CouchDB(couchdb)') 429 except ImportError: 430 LOGGER.debug('no Couchdb driver couchdb') 431 432 try: 433 import pymongo 434 DRIVERS.append('MongoDB(pymongo)') 435 except: 436 LOGGER.debug('no MongoDB driver pymongo') 437 438 try: 439 import imaplib 440 DRIVERS.append('IMAP(imaplib)') 441 except: 442 LOGGER.debug('no IMAP driver imaplib') 443 444 PLURALIZE_RULES = [ 445 (re.compile('child$'), re.compile('child$'), 'children'), 446 (re.compile('oot$'), re.compile('oot$'), 'eet'), 447 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 448 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 449 (re.compile('sis$'), re.compile('sis$'), 'ses'), 450 (re.compile('man$'), re.compile('man$'), 'men'), 451 (re.compile('ife$'), re.compile('ife$'), 'ives'), 452 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 453 (re.compile('lf$'), re.compile('lf$'), 'lves'), 454 (re.compile('[sxz]$'), re.compile('$'), 'es'), 455 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 456 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 457 (re.compile('$'), re.compile('$'), 's'), 458 ]
459 460 -def pluralize(singular, rules=PLURALIZE_RULES):
461 for line in rules: 462 re_search, re_sub, replace = line 463 plural = re_search.search(singular) and re_sub.sub(replace, singular) 464 if plural: return plural
465
466 -def hide_password(uri):
467 if isinstance(uri,(list,tuple)): 468 return [hide_password(item) for item in uri] 469 return REGEX_NOPASSWD.sub('******',uri)
470
471 -def OR(a,b):
472 return a|b
473
474 -def AND(a,b):
475 return a&b
476
477 -def IDENTITY(x): return x
478
479 -def varquote_aux(name,quotestr='%s'):
480 return name if REGEX_W.match(name) else quotestr % name
481
482 -def quote_keyword(a,keyword='timestamp'):
483 regex = re.compile('\.keyword(?=\w)') 484 a = regex.sub('."%s"' % keyword,a) 485 return a
486 487 if 'google' in DRIVERS: 488 489 is_jdbc = False
490 491 - class GAEDecimalProperty(gae.Property):
492 """ 493 GAE decimal implementation 494 """ 495 data_type = decimal.Decimal 496
497 - def __init__(self, precision, scale, **kwargs):
498 super(GAEDecimalProperty, self).__init__(self, **kwargs) 499 d = '1.' 500 for x in range(scale): 501 d += '0' 502 self.round = decimal.Decimal(d)
503
504 - def get_value_for_datastore(self, model_instance):
505 value = super(GAEDecimalProperty, self)\ 506 .get_value_for_datastore(model_instance) 507 if value is None or value == '': 508 return None 509 else: 510 return str(value)
511
512 - def make_value_from_datastore(self, value):
513 if value is None or value == '': 514 return None 515 else: 516 return decimal.Decimal(value).quantize(self.round)
517
518 - def validate(self, value):
519 value = super(GAEDecimalProperty, self).validate(value) 520 if value is None or isinstance(value, decimal.Decimal): 521 return value 522 elif isinstance(value, basestring): 523 return decimal.Decimal(value) 524 raise gae.BadValueError("Property %s must be a Decimal or string."\ 525 % self.name)
526
527 #TODO Needs more testing 528 - class NDBDecimalProperty(ndb.StringProperty):
529 """ 530 NDB decimal implementation 531 """ 532 data_type = decimal.Decimal 533
534 - def __init__(self, precision, scale, **kwargs):
535 d = '1.' 536 for x in range(scale): 537 d += '0' 538 self.round = decimal.Decimal(d)
539
540 - def _to_base_type(self, value):
541 if value is None or value == '': 542 return None 543 else: 544 return str(value)
545
546 - def _from_base_type(self, value):
547 if value is None or value == '': 548 return None 549 else: 550 return decimal.Decimal(value).quantize(self.round)
551
552 - def _validate(self, value):
553 if value is None or isinstance(value, decimal.Decimal): 554 return value 555 elif isinstance(value, basestring): 556 return decimal.Decimal(value) 557 raise TypeError("Property %s must be a Decimal or string."\ 558 % self._name)
559
560 ################################################################################### 561 # class that handles connection pooling (all adapters are derived from this one) 562 ################################################################################### 563 564 -class ConnectionPool(object):
565 566 POOLS = {} 567 check_active_connection = True 568 569 @staticmethod
570 - def set_folder(folder):
572 573 # ## this allows gluon to commit/rollback all dbs in this thread 574
575 - def close(self,action='commit',really=True):
576 if action: 577 if callable(action): 578 action(self) 579 else: 580 getattr(self, action)() 581 # ## if you want pools, recycle this connection 582 if self.pool_size: 583 GLOBAL_LOCKER.acquire() 584 pool = ConnectionPool.POOLS[self.uri] 585 if len(pool) < self.pool_size: 586 pool.append(self.connection) 587 really = False 588 GLOBAL_LOCKER.release() 589 if really: 590 self.close_connection() 591 self.connection = None
592 593 @staticmethod
594 - def close_all_instances(action):
595 """ to close cleanly databases in a multithreaded environment """ 596 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 597 for db_uid, db_group in dbs: 598 for db in db_group: 599 if hasattr(db,'_adapter'): 600 db._adapter.close(action) 601 getattr(THREAD_LOCAL,'db_instances',{}).clear() 602 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 603 if callable(action): 604 action(None) 605 return
606
607 - def find_or_make_work_folder(self):
608 """ this actually does not make the folder. it has to be there """ 609 self.folder = getattr(THREAD_LOCAL,'folder','') 610 611 if (os.path.isabs(self.folder) and 612 isinstance(self, UseDatabaseStoredFile) and 613 self.folder.startswith(os.getcwd())): 614 self.folder = os.path.relpath(self.folder, os.getcwd()) 615 616 # Creating the folder if it does not exist 617 if False and self.folder and not exists(self.folder): 618 os.mkdir(self.folder)
619
620 - def after_connection_hook(self):
621 """hook for the after_connection parameter""" 622 if callable(self._after_connection): 623 self._after_connection(self) 624 self.after_connection()
625
626 - def after_connection(self):
627 """ this it is supposed to be overloaded by adapters""" 628 pass
629
630 - def reconnect(self, f=None, cursor=True):
631 """ 632 this function defines: self.connection and self.cursor 633 (iff cursor is True) 634 if self.pool_size>0 it will try pull the connection from the pool 635 if the connection is not active (closed by db server) it will loop 636 if not self.pool_size or no active connections in pool makes a new one 637 """ 638 if getattr(self,'connection', None) != None: 639 return 640 if f is None: 641 f = self.connector 642 643 # if not hasattr(self, "driver") or self.driver is None: 644 # LOGGER.debug("Skipping connection since there's no driver") 645 # return 646 647 if not self.pool_size: 648 self.connection = f() 649 self.cursor = cursor and self.connection.cursor() 650 else: 651 uri = self.uri 652 POOLS = ConnectionPool.POOLS 653 while True: 654 GLOBAL_LOCKER.acquire() 655 if not uri in POOLS: 656 POOLS[uri] = [] 657 if POOLS[uri]: 658 self.connection = POOLS[uri].pop() 659 GLOBAL_LOCKER.release() 660 self.cursor = cursor and self.connection.cursor() 661 try: 662 if self.cursor and self.check_active_connection: 663 self.execute('SELECT 1;') 664 break 665 except: 666 pass 667 else: 668 GLOBAL_LOCKER.release() 669 self.connection = f() 670 self.cursor = cursor and self.connection.cursor() 671 break 672 self.after_connection_hook()
673
674 ################################################################################### 675 # metaclass to prepare adapter classes static values 676 ################################################################################### 677 -class AdapterMeta(type):
678 """Metaclass to support manipulation of adapter classes. 679 680 At the moment is used to intercept entity_quoting argument passed to DAL. 681 """ 682
683 - def __call__(cls, *args, **kwargs):
684 entity_quoting = kwargs.get('entity_quoting', False) 685 if 'entity_quoting' in kwargs: 686 del kwargs['entity_quoting'] 687 688 obj = super(AdapterMeta, cls).__call__(*args, **kwargs) 689 if not entity_quoting: 690 quot = obj.QUOTE_TEMPLATE = '%s' 691 regex_ent = r'(\w+)' 692 else: 693 quot = obj.QUOTE_TEMPLATE 694 regex_ent = REGEX_NO_GREEDY_ENTITY_NAME 695 obj.REGEX_TABLE_DOT_FIELD = re.compile(r'^' + \ 696 quot % regex_ent + \ 697 r'\.' + \ 698 quot % regex_ent + \ 699 r'$') 700 701 return obj
702
703 ################################################################################### 704 # this is a generic adapter that does nothing; all others are derived from this one 705 ################################################################################### 706 707 -class BaseAdapter(ConnectionPool):
708 709 __metaclass__ = AdapterMeta 710 711 native_json = False 712 driver = None 713 driver_name = None 714 drivers = () # list of drivers from which to pick 715 connection = None 716 commit_on_alter_table = False 717 support_distributed_transaction = False 718 uploads_in_blob = False 719 can_select_for_update = True 720 dbpath = None 721 folder = None 722 connector = lambda *args, **kwargs: None # __init__ should override this 723 724 TRUE = 'T' 725 FALSE = 'F' 726 T_SEP = ' ' 727 QUOTE_TEMPLATE = '"%s"' 728 729 730 types = { 731 'boolean': 'CHAR(1)', 732 'string': 'CHAR(%(length)s)', 733 'text': 'TEXT', 734 'json': 'TEXT', 735 'password': 'CHAR(%(length)s)', 736 'blob': 'BLOB', 737 'upload': 'CHAR(%(length)s)', 738 'integer': 'INTEGER', 739 'bigint': 'INTEGER', 740 'float':'DOUBLE', 741 'double': 'DOUBLE', 742 'decimal': 'DOUBLE', 743 'date': 'DATE', 744 'time': 'TIME', 745 'datetime': 'TIMESTAMP', 746 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 747 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 748 'list:integer': 'TEXT', 749 'list:string': 'TEXT', 750 'list:reference': 'TEXT', 751 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 752 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 753 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 754 'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 755 } 756
757 - def isOperationalError(self,exception):
758 if not hasattr(self.driver, "OperationalError"): 759 return None 760 return isinstance(exception, self.driver.OperationalError)
761
762 - def isProgrammingError(self,exception):
763 if not hasattr(self.driver, "ProgrammingError"): 764 return None 765 return isinstance(exception, self.driver.ProgrammingError)
766
767 - def id_query(self, table):
768 pkeys = getattr(table,'_primarykey',None) 769 if pkeys: 770 return table[pkeys[0]] != None 771 else: 772 return table._id != None
773
774 - def adapt(self, obj):
775 return "'%s'" % obj.replace("'", "''")
776
777 - def smart_adapt(self, obj):
778 if isinstance(obj,(int,float)): 779 return str(obj) 780 return self.adapt(str(obj))
781
782 - def file_exists(self, filename):
783 """ 784 to be used ONLY for files that on GAE may not be on filesystem 785 """ 786 return exists(filename)
787
788 - def file_open(self, filename, mode='rb', lock=True):
789 """ 790 to be used ONLY for files that on GAE may not be on filesystem 791 """ 792 if have_portalocker and lock: 793 fileobj = portalocker.LockedFile(filename,mode) 794 else: 795 fileobj = open(filename,mode) 796 return fileobj
797
798 - def file_close(self, fileobj):
799 """ 800 to be used ONLY for files that on GAE may not be on filesystem 801 """ 802 if fileobj: 803 fileobj.close()
804
805 - def file_delete(self, filename):
806 os.unlink(filename)
807
808 - def find_driver(self,adapter_args,uri=None):
809 self.adapter_args = adapter_args 810 if getattr(self,'driver',None) != None: 811 return 812 drivers_available = [driver for driver in self.drivers 813 if driver in globals()] 814 if uri: 815 items = uri.split('://',1)[0].split(':') 816 request_driver = items[1] if len(items)>1 else None 817 else: 818 request_driver = None 819 request_driver = request_driver or adapter_args.get('driver') 820 if request_driver: 821 if request_driver in drivers_available: 822 self.driver_name = request_driver 823 self.driver = globals().get(request_driver) 824 else: 825 raise RuntimeError("driver %s not available" % request_driver) 826 elif drivers_available: 827 self.driver_name = drivers_available[0] 828 self.driver = globals().get(self.driver_name) 829 else: 830 raise RuntimeError("no driver available %s" % str(self.drivers))
831
832 - def log(self, message, table=None):
833 """ Logs migrations 834 835 It will not log changes if logfile is not specified. Defaults 836 to sql.log 837 """ 838 839 isabs = None 840 logfilename = self.adapter_args.get('logfile','sql.log') 841 writelog = bool(logfilename) 842 if writelog: 843 isabs = os.path.isabs(logfilename) 844 845 if table and table._dbt and writelog and self.folder: 846 if isabs: 847 table._loggername = logfilename 848 else: 849 table._loggername = pjoin(self.folder, logfilename) 850 logfile = self.file_open(table._loggername, 'a') 851 logfile.write(message) 852 self.file_close(logfile)
853 854
855 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 856 credential_decoder=IDENTITY, driver_args={}, 857 adapter_args={},do_connect=True, after_connection=None):
858 self.db = db 859 self.dbengine = "None" 860 self.uri = uri 861 self.pool_size = pool_size 862 self.folder = folder 863 self.db_codec = db_codec 864 self._after_connection = after_connection 865 class Dummy(object): 866 lastrowid = 1 867 def __getattr__(self, value): 868 return lambda *a, **b: []
869 self.connection = Dummy() 870 self.cursor = Dummy() 871 872
873 - def sequence_name(self,tablename):
874 return self.QUOTE_TEMPLATE % ('%s_sequence' % tablename)
875
876 - def trigger_name(self,tablename):
877 return '%s_sequence' % tablename
878
879 - def varquote(self,name):
880 return name
881
882 - def create_table(self, table, 883 migrate=True, 884 fake_migrate=False, 885 polymodel=None):
886 db = table._db 887 fields = [] 888 # PostGIS geo fields are added after the table has been created 889 postcreation_fields = [] 890 sql_fields = {} 891 sql_fields_aux = {} 892 TFK = {} 893 tablename = table._tablename 894 sortable = 0 895 types = self.types 896 for field in table: 897 sortable += 1 898 field_name = field.name 899 field_type = field.type 900 if isinstance(field_type,SQLCustomType): 901 ftype = field_type.native or field_type.type 902 elif field_type.startswith('reference'): 903 referenced = field_type[10:].strip() 904 if referenced == '.': 905 referenced = tablename 906 constraint_name = self.constraint_name(tablename, field_name) 907 # if not '.' in referenced \ 908 # and referenced != tablename \ 909 # and hasattr(table,'_primarykey'): 910 # ftype = types['integer'] 911 #else: 912 try: 913 rtable = db[referenced] 914 rfield = rtable._id 915 rfieldname = rfield.name 916 rtablename = referenced 917 except (KeyError, ValueError, AttributeError), e: 918 LOGGER.debug('Error: %s' % e) 919 try: 920 rtablename,rfieldname = referenced.split('.') 921 rtable = db[rtablename] 922 rfield = rtable[rfieldname] 923 except Exception, e: 924 LOGGER.debug('Error: %s' %e) 925 raise KeyError('Cannot resolve reference %s in %s definition' % (referenced, table._tablename)) 926 927 # must be PK reference or unique 928 if getattr(rtable, '_primarykey', None) and rfieldname in rtable._primarykey or \ 929 rfield.unique: 930 ftype = types[rfield.type[:9]] % \ 931 dict(length=rfield.length) 932 # multicolumn primary key reference? 933 if not rfield.unique and len(rtable._primarykey)>1: 934 # then it has to be a table level FK 935 if rtablename not in TFK: 936 TFK[rtablename] = {} 937 TFK[rtablename][rfieldname] = field_name 938 else: 939 ftype = ftype + \ 940 types['reference FK'] % dict( 941 constraint_name = constraint_name, # should be quoted 942 foreign_key = rtable.sqlsafe + ' (' + rfield.sqlsafe_name + ')', 943 table_name = table.sqlsafe, 944 field_name = field.sqlsafe_name, 945 on_delete_action=field.ondelete) 946 else: 947 # make a guess here for circular references 948 if referenced in db: 949 id_fieldname = db[referenced]._id.sqlsafe_name 950 elif referenced == tablename: 951 id_fieldname = table._id.sqlsafe_name 952 else: #make a guess 953 id_fieldname = self.QUOTE_TEMPLATE % 'id' 954 #gotcha: the referenced table must be defined before 955 #the referencing one to be able to create the table 956 #Also if it's not recommended, we can still support 957 #references to tablenames without rname to make 958 #migrations and model relationship work also if tables 959 #are not defined in order 960 if referenced == tablename: 961 real_referenced = db[referenced].sqlsafe 962 else: 963 real_referenced = (referenced in db 964 and db[referenced].sqlsafe 965 or referenced) 966 rfield = db[referenced]._id 967 ftype = types[field_type[:9]] % dict( 968 index_name = self.QUOTE_TEMPLATE % (field_name+'__idx'), 969 field_name = field.sqlsafe_name, 970 constraint_name = self.QUOTE_TEMPLATE % constraint_name, 971 foreign_key = '%s (%s)' % (real_referenced, rfield.sqlsafe_name), 972 on_delete_action=field.ondelete) 973 elif field_type.startswith('list:reference'): 974 ftype = types[field_type[:14]] 975 elif field_type.startswith('decimal'): 976 precision, scale = map(int,field_type[8:-1].split(',')) 977 ftype = types[field_type[:7]] % \ 978 dict(precision=precision,scale=scale) 979 elif field_type.startswith('geo'): 980 if not hasattr(self,'srid'): 981 raise RuntimeError('Adapter does not support geometry') 982 srid = self.srid 983 geotype, parms = field_type[:-1].split('(') 984 if not geotype in types: 985 raise SyntaxError( 986 'Field: unknown field type: %s for %s' \ 987 % (field_type, field_name)) 988 ftype = types[geotype] 989 if self.dbengine == 'postgres' and geotype == 'geometry': 990 # parameters: schema, srid, dimension 991 dimension = 2 # GIS.dimension ??? 992 parms = parms.split(',') 993 if len(parms) == 3: 994 schema, srid, dimension = parms 995 elif len(parms) == 2: 996 schema, srid = parms 997 else: 998 schema = parms[0] 999 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 1000 ftype = ftype % dict(schema=schema, 1001 tablename=tablename, 1002 fieldname=field_name, srid=srid, 1003 dimension=dimension) 1004 postcreation_fields.append(ftype) 1005 elif not field_type in types: 1006 raise SyntaxError('Field: unknown field type: %s for %s' % \ 1007 (field_type, field_name)) 1008 else: 1009 ftype = types[field_type]\ 1010 % dict(length=field.length) 1011 if not field_type.startswith('id') and \ 1012 not field_type.startswith('reference'): 1013 if field.notnull: 1014 ftype += ' NOT NULL' 1015 else: 1016 ftype += self.ALLOW_NULL() 1017 if field.unique: 1018 ftype += ' UNIQUE' 1019 if field.custom_qualifier: 1020 ftype += ' %s' % field.custom_qualifier 1021 1022 # add to list of fields 1023 sql_fields[field_name] = dict( 1024 length=field.length, 1025 unique=field.unique, 1026 notnull=field.notnull, 1027 sortable=sortable, 1028 type=str(field_type), 1029 sql=ftype) 1030 1031 if field.notnull and not field.default is None: 1032 # Caveat: sql_fields and sql_fields_aux 1033 # differ for default values. 1034 # sql_fields is used to trigger migrations and sql_fields_aux 1035 # is used for create tables. 1036 # The reason is that we do not want to trigger 1037 # a migration simply because a default value changes. 1038 not_null = self.NOT_NULL(field.default, field_type) 1039 ftype = ftype.replace('NOT NULL', not_null) 1040 sql_fields_aux[field_name] = dict(sql=ftype) 1041 # Postgres - PostGIS: 1042 # geometry fields are added after the table has been created, not now 1043 if not (self.dbengine == 'postgres' and \ 1044 field_type.startswith('geom')): 1045 fields.append('%s %s' % (field.sqlsafe_name, ftype)) 1046 other = ';' 1047 1048 # backend-specific extensions to fields 1049 if self.dbengine == 'mysql': 1050 if not hasattr(table, "_primarykey"): 1051 fields.append('PRIMARY KEY (%s)' % (self.QUOTE_TEMPLATE % table._id.name)) 1052 engine = self.adapter_args.get('engine','InnoDB') 1053 other = ' ENGINE=%s CHARACTER SET utf8;' % engine 1054 1055 fields = ',\n '.join(fields) 1056 for rtablename in TFK: 1057 rfields = TFK[rtablename] 1058 pkeys = [self.QUOTE_TEMPLATE % pk for pk in db[rtablename]._primarykey] 1059 fkeys = [self.QUOTE_TEMPLATE % rfields[k].name for k in pkeys ] 1060 fields = fields + ',\n ' + \ 1061 types['reference TFK'] % dict( 1062 table_name = table.sqlsafe, 1063 field_name=', '.join(fkeys), 1064 foreign_table = table.sqlsafe, 1065 foreign_key = ', '.join(pkeys), 1066 on_delete_action = field.ondelete) 1067 1068 table_rname = table.sqlsafe 1069 1070 if getattr(table,'_primarykey',None): 1071 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 1072 (table.sqlsafe, fields, 1073 self.PRIMARY_KEY(', '.join([self.QUOTE_TEMPLATE % pk for pk in table._primarykey])),other) 1074 else: 1075 query = "CREATE TABLE %s(\n %s\n)%s" % \ 1076 (table.sqlsafe, fields, other) 1077 1078 if self.uri.startswith('sqlite:///') \ 1079 or self.uri.startswith('spatialite:///'): 1080 path_encoding = sys.getfilesystemencoding() \ 1081 or locale.getdefaultlocale()[1] or 'utf8' 1082 dbpath = self.uri[9:self.uri.rfind('/')]\ 1083 .decode('utf8').encode(path_encoding) 1084 else: 1085 dbpath = self.folder 1086 1087 if not migrate: 1088 return query 1089 elif self.uri.startswith('sqlite:memory')\ 1090 or self.uri.startswith('spatialite:memory'): 1091 table._dbt = None 1092 elif isinstance(migrate, str): 1093 table._dbt = pjoin(dbpath, migrate) 1094 else: 1095 table._dbt = pjoin( 1096 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 1097 1098 if not table._dbt or not self.file_exists(table._dbt): 1099 if table._dbt: 1100 self.log('timestamp: %s\n%s\n' 1101 % (datetime.datetime.today().isoformat(), 1102 query), table) 1103 if not fake_migrate: 1104 self.create_sequence_and_triggers(query,table) 1105 table._db.commit() 1106 # Postgres geom fields are added now, 1107 # after the table has been created 1108 for query in postcreation_fields: 1109 self.execute(query) 1110 table._db.commit() 1111 if table._dbt: 1112 tfile = self.file_open(table._dbt, 'w') 1113 pickle.dump(sql_fields, tfile) 1114 self.file_close(tfile) 1115 if fake_migrate: 1116 self.log('faked!\n', table) 1117 else: 1118 self.log('success!\n', table) 1119 else: 1120 tfile = self.file_open(table._dbt, 'r') 1121 try: 1122 sql_fields_old = pickle.load(tfile) 1123 except EOFError: 1124 self.file_close(tfile) 1125 raise RuntimeError('File %s appears corrupted' % table._dbt) 1126 self.file_close(tfile) 1127 if sql_fields != sql_fields_old: 1128 self.migrate_table( 1129 table, 1130 sql_fields, sql_fields_old, 1131 sql_fields_aux, None, 1132 fake_migrate=fake_migrate 1133 ) 1134 return query
1135
1136 - def migrate_table( 1137 self, 1138 table, 1139 sql_fields, 1140 sql_fields_old, 1141 sql_fields_aux, 1142 logfile, 1143 fake_migrate=False, 1144 ):
1145 1146 # logfile is deprecated (moved to adapter.log method) 1147 db = table._db 1148 db._migrated.append(table._tablename) 1149 tablename = table._tablename 1150 def fix(item): 1151 k,v=item 1152 if not isinstance(v,dict): 1153 v=dict(type='unknown',sql=v) 1154 if self.ignore_field_case is not True: return k, v 1155 return k.lower(),v
1156 # make sure all field names are lower case to avoid 1157 # migrations because of case cahnge 1158 sql_fields = dict(map(fix,sql_fields.iteritems())) 1159 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1160 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1161 if db._debug: 1162 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1163 1164 keys = sql_fields.keys() 1165 for key in sql_fields_old: 1166 if not key in keys: 1167 keys.append(key) 1168 new_add = self.concat_add(tablename) 1169 1170 metadata_change = False 1171 sql_fields_current = copy.copy(sql_fields_old) 1172 for key in keys: 1173 query = None 1174 if not key in sql_fields_old: 1175 sql_fields_current[key] = sql_fields[key] 1176 if self.dbengine in ('postgres',) and \ 1177 sql_fields[key]['type'].startswith('geometry'): 1178 # 'sql' == ftype in sql 1179 query = [ sql_fields[key]['sql'] ] 1180 else: 1181 query = ['ALTER TABLE %s ADD %s %s;' % \ 1182 (table.sqlsafe, key, 1183 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1184 metadata_change = True 1185 elif self.dbengine in ('sqlite', 'spatialite'): 1186 if key in sql_fields: 1187 sql_fields_current[key] = sql_fields[key] 1188 metadata_change = True 1189 elif not key in sql_fields: 1190 del sql_fields_current[key] 1191 ftype = sql_fields_old[key]['type'] 1192 if (self.dbengine in ('postgres',) and 1193 ftype.startswith('geometry')): 1194 geotype, parms = ftype[:-1].split('(') 1195 schema = parms.split(',')[0] 1196 query = [ "SELECT DropGeometryColumn ('%(schema)s', "+ 1197 "'%(table)s', '%(field)s');" % 1198 dict(schema=schema, table=tablename, field=key,) ] 1199 elif self.dbengine in ('firebird',): 1200 query = ['ALTER TABLE %s DROP %s;' % 1201 (self.QUOTE_TEMPLATE % tablename, self.QUOTE_TEMPLATE % key)] 1202 else: 1203 query = ['ALTER TABLE %s DROP COLUMN %s;' % 1204 (self.QUOTE_TEMPLATE % tablename, self.QUOTE_TEMPLATE % key)] 1205 metadata_change = True 1206 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1207 and not (key in table.fields and 1208 isinstance(table[key].type, SQLCustomType)) \ 1209 and not sql_fields[key]['type'].startswith('reference')\ 1210 and not sql_fields[key]['type'].startswith('double')\ 1211 and not sql_fields[key]['type'].startswith('id'): 1212 sql_fields_current[key] = sql_fields[key] 1213 t = tablename 1214 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1215 if self.dbengine in ('firebird',): 1216 drop_expr = 'ALTER TABLE %s DROP %s;' 1217 else: 1218 drop_expr = 'ALTER TABLE %s DROP COLUMN %s;' 1219 key_tmp = key + '__tmp' 1220 query = ['ALTER TABLE %s ADD %s %s;' % (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key_tmp, tt), 1221 'UPDATE %s SET %s=%s;' % 1222 (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key_tmp, self.QUOTE_TEMPLATE % key), 1223 drop_expr % (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key), 1224 'ALTER TABLE %s ADD %s %s;' % 1225 (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key, tt), 1226 'UPDATE %s SET %s=%s;' % 1227 (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key, self.QUOTE_TEMPLATE % key_tmp), 1228 drop_expr % (self.QUOTE_TEMPLATE % t, self.QUOTE_TEMPLATE % key_tmp)] 1229 metadata_change = True 1230 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1231 sql_fields_current[key] = sql_fields[key] 1232 metadata_change = True 1233 1234 if query: 1235 self.log('timestamp: %s\n' 1236 % datetime.datetime.today().isoformat(), table) 1237 db['_lastsql'] = '\n'.join(query) 1238 for sub_query in query: 1239 self.log(sub_query + '\n', table) 1240 if fake_migrate: 1241 if db._adapter.commit_on_alter_table: 1242 self.save_dbt(table,sql_fields_current) 1243 self.log('faked!\n', table) 1244 else: 1245 self.execute(sub_query) 1246 # Caveat: mysql, oracle and firebird 1247 # do not allow multiple alter table 1248 # in one transaction so we must commit 1249 # partial transactions and 1250 # update table._dbt after alter table. 1251 if db._adapter.commit_on_alter_table: 1252 db.commit() 1253 self.save_dbt(table,sql_fields_current) 1254 self.log('success!\n', table) 1255 1256 elif metadata_change: 1257 self.save_dbt(table,sql_fields_current) 1258 1259 if metadata_change and not (query and db._adapter.commit_on_alter_table): 1260 db.commit() 1261 self.save_dbt(table,sql_fields_current) 1262 self.log('success!\n', table) 1263
1264 - def save_dbt(self,table, sql_fields_current):
1265 tfile = self.file_open(table._dbt, 'w') 1266 pickle.dump(sql_fields_current, tfile) 1267 self.file_close(tfile)
1268
1269 - def LOWER(self, first):
1270 return 'LOWER(%s)' % self.expand(first)
1271
1272 - def UPPER(self, first):
1273 return 'UPPER(%s)' % self.expand(first)
1274
1275 - def COUNT(self, first, distinct=None):
1276 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1277 % self.expand(first)
1278
1279 - def EXTRACT(self, first, what):
1280 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1281
1282 - def EPOCH(self, first):
1283 return self.EXTRACT(first, 'epoch')
1284
1285 - def LENGTH(self, first):
1286 return "LENGTH(%s)" % self.expand(first)
1287
1288 - def AGGREGATE(self, first, what):
1289 return "%s(%s)" % (what, self.expand(first))
1290
1291 - def JOIN(self):
1292 return 'JOIN'
1293
1294 - def LEFT_JOIN(self):
1295 return 'LEFT JOIN'
1296
1297 - def RANDOM(self):
1298 return 'Random()'
1299
1300 - def NOT_NULL(self, default, field_type):
1301 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1302
1303 - def COALESCE(self, first, second):
1304 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1305 return 'COALESCE(%s)' % ','.join(expressions)
1306
1307 - def COALESCE_ZERO(self, first):
1308 return 'COALESCE(%s,0)' % self.expand(first)
1309
1310 - def RAW(self, first):
1311 return first
1312
1313 - def ALLOW_NULL(self):
1314 return ''
1315
1316 - def SUBSTRING(self, field, parameters):
1317 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1318
1319 - def PRIMARY_KEY(self, key):
1320 return 'PRIMARY KEY(%s)' % key
1321
1322 - def _drop(self, table, mode):
1323 return ['DROP TABLE %s;' % table.sqlsafe]
1324
1325 - def drop(self, table, mode=''):
1326 db = table._db 1327 queries = self._drop(table, mode) 1328 for query in queries: 1329 if table._dbt: 1330 self.log(query + '\n', table) 1331 self.execute(query) 1332 db.commit() 1333 del db[table._tablename] 1334 del db.tables[db.tables.index(table._tablename)] 1335 db._remove_references_to(table) 1336 if table._dbt: 1337 self.file_delete(table._dbt) 1338 self.log('success!\n', table)
1339
1340 - def _insert(self, table, fields):
1341 table_rname = table.sqlsafe 1342 if fields: 1343 keys = ','.join(f.sqlsafe_name for f, v in fields) 1344 values = ','.join(self.expand(v, f.type) for f, v in fields) 1345 return 'INSERT INTO %s(%s) VALUES (%s);' % (table_rname, keys, values) 1346 else: 1347 return self._insert_empty(table)
1348
1349 - def _insert_empty(self, table):
1350 return 'INSERT INTO %s DEFAULT VALUES;' % (table.sqlsafe)
1351
1352 - def insert(self, table, fields):
1353 query = self._insert(table,fields) 1354 try: 1355 self.execute(query) 1356 except Exception: 1357 e = sys.exc_info()[1] 1358 if hasattr(table,'_on_insert_error'): 1359 return table._on_insert_error(table,fields,e) 1360 raise e 1361 if hasattr(table, '_primarykey'): 1362 mydict = dict([(k[0].name, k[1]) for k in fields if k[0].name in table._primarykey]) 1363 if mydict != {}: 1364 return mydict 1365 id = self.lastrowid(table) 1366 if hasattr(table, '_primarykey') and len(table._primarykey) == 1: 1367 id = {table._primarykey[0]: id} 1368 if not isinstance(id, (int, long)): 1369 return id 1370 rid = Reference(id) 1371 (rid._table, rid._record) = (table, None) 1372 return rid
1373
1374 - def bulk_insert(self, table, items):
1375 return [self.insert(table,item) for item in items]
1376
1377 - def NOT(self, first):
1378 return '(NOT %s)' % self.expand(first)
1379
1380 - def AND(self, first, second):
1381 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1382
1383 - def OR(self, first, second):
1384 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1385
1386 - def BELONGS(self, first, second):
1387 if isinstance(second, str): 1388 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1389 if not second: 1390 return '(1=0)' 1391 items = ','.join(self.expand(item, first.type) for item in second) 1392 return '(%s IN (%s))' % (self.expand(first), items)
1393
1394 - def REGEXP(self, first, second):
1395 "regular expression operator" 1396 raise NotImplementedError
1397
1398 - def LIKE(self, first, second):
1399 "case sensitive like operator" 1400 raise NotImplementedError
1401
1402 - def ILIKE(self, first, second):
1403 "case in-sensitive like operator" 1404 return '(%s LIKE %s)' % (self.expand(first), 1405 self.expand(second, 'string'))
1406
1407 - def STARTSWITH(self, first, second):
1408 return '(%s LIKE %s)' % (self.expand(first), 1409 self.expand(second+'%', 'string'))
1410
1411 - def ENDSWITH(self, first, second):
1412 return '(%s LIKE %s)' % (self.expand(first), 1413 self.expand('%'+second, 'string'))
1414
1415 - def CONTAINS(self,first,second,case_sensitive=False):
1416 if first.type in ('string','text', 'json'): 1417 if isinstance(second,Expression): 1418 second = Expression(None,self.CONCAT('%',Expression( 1419 None,self.REPLACE(second,('%','%%'))),'%')) 1420 else: 1421 second = '%'+str(second).replace('%','%%')+'%' 1422 elif first.type.startswith('list:'): 1423 if isinstance(second,Expression): 1424 second = Expression(None,self.CONCAT( 1425 '%|',Expression(None,self.REPLACE( 1426 Expression(None,self.REPLACE( 1427 second,('%','%%'))),('|','||'))),'|%')) 1428 else: 1429 second = '%|'+str(second).replace('%','%%')\ 1430 .replace('|','||')+'|%' 1431 op = case_sensitive and self.LIKE or self.ILIKE 1432 return op(first,second)
1433
1434 - def EQ(self, first, second=None):
1435 if second is None: 1436 return '(%s IS NULL)' % self.expand(first) 1437 return '(%s = %s)' % (self.expand(first), 1438 self.expand(second, first.type))
1439
1440 - def NE(self, first, second=None):
1441 if second is None: 1442 return '(%s IS NOT NULL)' % self.expand(first) 1443 return '(%s <> %s)' % (self.expand(first), 1444 self.expand(second, first.type))
1445
1446 - def LT(self,first,second=None):
1447 if second is None: 1448 raise RuntimeError("Cannot compare %s < None" % first) 1449 return '(%s < %s)' % (self.expand(first), 1450 self.expand(second,first.type))
1451
1452 - def LE(self,first,second=None):
1453 if second is None: 1454 raise RuntimeError("Cannot compare %s <= None" % first) 1455 return '(%s <= %s)' % (self.expand(first), 1456 self.expand(second,first.type))
1457
1458 - def GT(self,first,second=None):
1459 if second is None: 1460 raise RuntimeError("Cannot compare %s > None" % first) 1461 return '(%s > %s)' % (self.expand(first), 1462 self.expand(second,first.type))
1463
1464 - def GE(self,first,second=None):
1465 if second is None: 1466 raise RuntimeError("Cannot compare %s >= None" % first) 1467 return '(%s >= %s)' % (self.expand(first), 1468 self.expand(second,first.type))
1469
1470 - def is_numerical_type(self, ftype):
1471 return ftype in ('integer','boolean','double','bigint') or \ 1472 ftype.startswith('decimal')
1473
1474 - def REPLACE(self, first, (second, third)):
1475 return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), 1476 self.expand(second,'string'), 1477 self.expand(third,'string'))
1478
1479 - def CONCAT(self, *items):
1480 return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
1481
1482 - def ADD(self, first, second):
1483 if self.is_numerical_type(first.type) or isinstance(first.type, Field): 1484 return '(%s + %s)' % (self.expand(first), 1485 self.expand(second, first.type)) 1486 else: 1487 return self.CONCAT(first, second)
1488
1489 - def SUB(self, first, second):
1490 return '(%s - %s)' % (self.expand(first), 1491 self.expand(second, first.type))
1492
1493 - def MUL(self, first, second):
1494 return '(%s * %s)' % (self.expand(first), 1495 self.expand(second, first.type))
1496
1497 - def DIV(self, first, second):
1498 return '(%s / %s)' % (self.expand(first), 1499 self.expand(second, first.type))
1500
1501 - def MOD(self, first, second):
1502 return '(%s %% %s)' % (self.expand(first), 1503 self.expand(second, first.type))
1504
1505 - def AS(self, first, second):
1506 return '%s AS %s' % (self.expand(first), second)
1507
1508 - def ON(self, first, second):
1509 table_rname = self.table_alias(first) 1510 if use_common_filters(second): 1511 second = self.common_filter(second,[first._tablename]) 1512 return ('%s ON %s') % (self.expand(table_rname), self.expand(second))
1513
1514 - def INVERT(self, first):
1515 return '%s DESC' % self.expand(first)
1516
1517 - def COMMA(self, first, second):
1518 return '%s, %s' % (self.expand(first), self.expand(second))
1519
1520 - def CAST(self, first, second):
1521 return 'CAST(%s AS %s)' % (first, second)
1522
1523 - def expand(self, expression, field_type=None, colnames=False):
1524 if isinstance(expression, Field): 1525 et = expression.table 1526 if not colnames: 1527 table_rname = et._ot and self.QUOTE_TEMPLATE % et._tablename or et._rname or self.QUOTE_TEMPLATE % et._tablename 1528 out = '%s.%s' % (table_rname, expression._rname or (self.QUOTE_TEMPLATE % (expression.name))) 1529 else: 1530 out = '%s.%s' % (self.QUOTE_TEMPLATE % et._tablename, self.QUOTE_TEMPLATE % expression.name) 1531 if field_type == 'string' and not expression.type in ( 1532 'string','text','json','password'): 1533 out = self.CAST(out, self.types['text']) 1534 return out 1535 elif isinstance(expression, (Expression, Query)): 1536 first = expression.first 1537 second = expression.second 1538 op = expression.op 1539 optional_args = expression.optional_args or {} 1540 if not second is None: 1541 out = op(first, second, **optional_args) 1542 elif not first is None: 1543 out = op(first,**optional_args) 1544 elif isinstance(op, str): 1545 if op.endswith(';'): 1546 op=op[:-1] 1547 out = '(%s)' % op 1548 else: 1549 out = op() 1550 return out 1551 elif field_type: 1552 return str(self.represent(expression,field_type)) 1553 elif isinstance(expression,(list,tuple)): 1554 return ','.join(self.represent(item,field_type) \ 1555 for item in expression) 1556 elif isinstance(expression, bool): 1557 return '1' if expression else '0' 1558 else: 1559 return str(expression)
1560
1561 - def table_alias(self, tbl):
1562 if not isinstance(tbl, Table): 1563 tbl = self.db[tbl] 1564 return tbl.sqlsafe_alias
1565 1566
1567 - def alias(self, table, alias):
1568 """ 1569 Given a table object, makes a new table object 1570 with alias name. 1571 """ 1572 other = copy.copy(table) 1573 other['_ot'] = other._ot or other.sqlsafe 1574 other['ALL'] = SQLALL(other) 1575 other['_tablename'] = alias 1576 for fieldname in other.fields: 1577 other[fieldname] = copy.copy(other[fieldname]) 1578 other[fieldname]._tablename = alias 1579 other[fieldname].tablename = alias 1580 other[fieldname].table = other 1581 table._db[alias] = other 1582 return other
1583
1584 - def _truncate(self, table, mode=''):
1585 return ['TRUNCATE TABLE %s %s;' % (table.sqlsafe, mode or '')]
1586
1587 - def truncate(self, table, mode= ' '):
1588 # Prepare functions "write_to_logfile" and "close_logfile" 1589 try: 1590 queries = table._db._adapter._truncate(table, mode) 1591 for query in queries: 1592 self.log(query + '\n', table) 1593 self.execute(query) 1594 self.log('success!\n', table) 1595 finally: 1596 pass
1597
1598 - def _update(self, tablename, query, fields):
1599 if query: 1600 if use_common_filters(query): 1601 query = self.common_filter(query, [tablename]) 1602 sql_w = ' WHERE ' + self.expand(query) 1603 else: 1604 sql_w = '' 1605 sql_v = ','.join(['%s=%s' % (field.sqlsafe_name, 1606 self.expand(value, field.type)) \ 1607 for (field, value) in fields]) 1608 tablename = self.db[tablename].sqlsafe 1609 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1610
1611 - def update(self, tablename, query, fields):
1612 sql = self._update(tablename, query, fields) 1613 try: 1614 self.execute(sql) 1615 except Exception: 1616 e = sys.exc_info()[1] 1617 table = self.db[tablename] 1618 if hasattr(table,'_on_update_error'): 1619 return table._on_update_error(table,query,fields,e) 1620 raise e 1621 try: 1622 return self.cursor.rowcount 1623 except: 1624 return None
1625
1626 - def _delete(self, tablename, query):
1627 if query: 1628 if use_common_filters(query): 1629 query = self.common_filter(query, [tablename]) 1630 sql_w = ' WHERE ' + self.expand(query) 1631 else: 1632 sql_w = '' 1633 tablename = self.db[tablename].sqlsafe 1634 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1635
1636 - def delete(self, tablename, query):
1637 sql = self._delete(tablename, query) 1638 ### special code to handle CASCADE in SQLite & SpatiaLite 1639 db = self.db 1640 table = db[tablename] 1641 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1642 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1643 ### end special code to handle CASCADE in SQLite & SpatiaLite 1644 self.execute(sql) 1645 try: 1646 counter = self.cursor.rowcount 1647 except: 1648 counter = None 1649 ### special code to handle CASCADE in SQLite & SpatiaLite 1650 if self.dbengine in ('sqlite', 'spatialite') and counter: 1651 for field in table._referenced_by: 1652 if field.type=='reference '+table._tablename \ 1653 and field.ondelete=='CASCADE': 1654 db(field.belongs(deleted)).delete() 1655 ### end special code to handle CASCADE in SQLite & SpatiaLite 1656 return counter
1657
1658 - def get_table(self, query):
1659 tablenames = self.tables(query) 1660 if len(tablenames)==1: 1661 return tablenames[0] 1662 elif len(tablenames)<1: 1663 raise RuntimeError("No table selected") 1664 else: 1665 raise RuntimeError("Too many tables selected")
1666
1667 - def expand_all(self, fields, tablenames):
1668 db = self.db 1669 new_fields = [] 1670 append = new_fields.append 1671 for item in fields: 1672 if isinstance(item,SQLALL): 1673 new_fields += item._table 1674 elif isinstance(item,str): 1675 m = self.REGEX_TABLE_DOT_FIELD.match(item) 1676 if m: 1677 tablename,fieldname = m.groups() 1678 append(db[tablename][fieldname]) 1679 else: 1680 append(Expression(db,lambda item=item:item)) 1681 else: 1682 append(item) 1683 # ## if no fields specified take them all from the requested tables 1684 if not new_fields: 1685 for table in tablenames: 1686 for field in db[table]: 1687 append(field) 1688 return new_fields
1689
1690 - def _select(self, query, fields, attributes):
1691 tables = self.tables 1692 for key in set(attributes.keys())-SELECT_ARGS: 1693 raise SyntaxError('invalid select attribute: %s' % key) 1694 args_get = attributes.get 1695 tablenames = tables(query) 1696 tablenames_for_common_filters = tablenames 1697 for field in fields: 1698 if isinstance(field, basestring): 1699 m = self.REGEX_TABLE_DOT_FIELD.match(field) 1700 if m: 1701 tn,fn = m.groups() 1702 field = self.db[tn][fn] 1703 for tablename in tables(field): 1704 if not tablename in tablenames: 1705 tablenames.append(tablename) 1706 1707 if len(tablenames) < 1: 1708 raise SyntaxError('Set: no tables selected') 1709 def colexpand(field): 1710 return self.expand(field, colnames=True)
1711 self._colnames = map(colexpand, fields) 1712 def geoexpand(field): 1713 if isinstance(field.type,str) and field.type.startswith('geo') and isinstance(field, Field): 1714 field = field.st_astext() 1715 return self.expand(field) 1716 sql_f = ', '.join(map(geoexpand, fields)) 1717 sql_o = '' 1718 sql_s = '' 1719 left = args_get('left', False) 1720 inner_join = args_get('join', False) 1721 distinct = args_get('distinct', False) 1722 groupby = args_get('groupby', False) 1723 orderby = args_get('orderby', False) 1724 having = args_get('having', False) 1725 limitby = args_get('limitby', False) 1726 orderby_on_limitby = args_get('orderby_on_limitby', True) 1727 for_update = args_get('for_update', False) 1728 if self.can_select_for_update is False and for_update is True: 1729 raise SyntaxError('invalid select attribute: for_update') 1730 if distinct is True: 1731 sql_s += 'DISTINCT' 1732 elif distinct: 1733 sql_s += 'DISTINCT ON (%s)' % distinct 1734 if inner_join: 1735 icommand = self.JOIN() 1736 if not isinstance(inner_join, (tuple, list)): 1737 inner_join = [inner_join] 1738 ijoint = [t._tablename for t in inner_join 1739 if not isinstance(t,Expression)] 1740 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1741 itables_to_merge={} #issue 490 1742 [itables_to_merge.update( 1743 dict.fromkeys(tables(t))) for t in ijoinon] 1744 ijoinont = [t.first._tablename for t in ijoinon] 1745 [itables_to_merge.pop(t) for t in ijoinont 1746 if t in itables_to_merge] #issue 490 1747 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1748 iexcluded = [t for t in tablenames 1749 if not t in iimportant_tablenames] 1750 if left: 1751 join = attributes['left'] 1752 command = self.LEFT_JOIN() 1753 if not isinstance(join, (tuple, list)): 1754 join = [join] 1755 joint = [t._tablename for t in join 1756 if not isinstance(t, Expression)] 1757 joinon = [t for t in join if isinstance(t, Expression)] 1758 #patch join+left patch (solves problem with ordering in left joins) 1759 tables_to_merge={} 1760 [tables_to_merge.update( 1761 dict.fromkeys(tables(t))) for t in joinon] 1762 joinont = [t.first._tablename for t in joinon] 1763 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1764 tablenames_for_common_filters = [t for t in tablenames 1765 if not t in joinont ] 1766 important_tablenames = joint + joinont + tables_to_merge.keys() 1767 excluded = [t for t in tablenames 1768 if not t in important_tablenames ] 1769 else: 1770 excluded = tablenames 1771 1772 if use_common_filters(query): 1773 query = self.common_filter(query,tablenames_for_common_filters) 1774 sql_w = ' WHERE ' + self.expand(query) if query else '' 1775 1776 if inner_join and not left: 1777 sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ 1778 itables_to_merge.keys()]) 1779 for t in ijoinon: 1780 sql_t += ' %s %s' % (icommand, t) 1781 elif not inner_join and left: 1782 sql_t = ', '.join([self.table_alias(t) for t in excluded + \ 1783 tables_to_merge.keys()]) 1784 if joint: 1785 sql_t += ' %s %s' % (command, 1786 ','.join([t for t in joint])) 1787 for t in joinon: 1788 sql_t += ' %s %s' % (command, t) 1789 elif inner_join and left: 1790 all_tables_in_query = set(important_tablenames + \ 1791 iimportant_tablenames + \ 1792 tablenames) 1793 tables_in_joinon = set(joinont + ijoinont) 1794 tables_not_in_joinon = \ 1795 all_tables_in_query.difference(tables_in_joinon) 1796 sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon]) 1797 for t in ijoinon: 1798 sql_t += ' %s %s' % (icommand, t) 1799 if joint: 1800 sql_t += ' %s %s' % (command, 1801 ','.join([t for t in joint])) 1802 for t in joinon: 1803 sql_t += ' %s %s' % (command, t) 1804 else: 1805 sql_t = ', '.join(self.table_alias(t) for t in tablenames) 1806 if groupby: 1807 if isinstance(groupby, (list, tuple)): 1808 groupby = xorify(groupby) 1809 sql_o += ' GROUP BY %s' % self.expand(groupby) 1810 if having: 1811 sql_o += ' HAVING %s' % attributes['having'] 1812 if orderby: 1813 if isinstance(orderby, (list, tuple)): 1814 orderby = xorify(orderby) 1815 if str(orderby) == '<random>': 1816 sql_o += ' ORDER BY %s' % self.RANDOM() 1817 else: 1818 sql_o += ' ORDER BY %s' % self.expand(orderby) 1819 if (limitby and not groupby and tablenames and orderby_on_limitby and not orderby): 1820 sql_o += ' ORDER BY %s' % ', '.join( 1821 [self.db[t].sqlsafe + '.' + self.db[t][x].sqlsafe_name for t in tablenames for x in ( 1822 hasattr(self.db[t], '_primarykey') and self.db[t]._primarykey 1823 or ['_id'] 1824 ) 1825 ] 1826 ) 1827 # oracle does not support limitby 1828 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1829 if for_update and self.can_select_for_update is True: 1830 sql = sql.rstrip(';') + ' FOR UPDATE;' 1831 return sql 1832
1833 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1834 if limitby: 1835 (lmin, lmax) = limitby 1836 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1837 return 'SELECT %s %s FROM %s%s%s;' % \ 1838 (sql_s, sql_f, sql_t, sql_w, sql_o)
1839
1840 - def _fetchall(self):
1841 return self.cursor.fetchall()
1842
1843 - def _select_aux(self,sql,fields,attributes):
1844 args_get = attributes.get 1845 cache = args_get('cache',None) 1846 if not cache: 1847 self.execute(sql) 1848 rows = self._fetchall() 1849 else: 1850 (cache_model, time_expire) = cache 1851 key = self.uri + '/' + sql + '/rows' 1852 if len(key)>200: key = hashlib_md5(key).hexdigest() 1853 def _select_aux2(): 1854 self.execute(sql) 1855 return self._fetchall()
1856 rows = cache_model(key,_select_aux2,time_expire) 1857 if isinstance(rows,tuple): 1858 rows = list(rows) 1859 limitby = args_get('limitby', None) or (0,) 1860 rows = self.rowslice(rows,limitby[0],None) 1861 processor = args_get('processor',self.parse) 1862 cacheable = args_get('cacheable',False) 1863 return processor(rows,fields,self._colnames,cacheable=cacheable) 1864
1865 - def select(self, query, fields, attributes):
1866 """ 1867 Always returns a Rows object, possibly empty. 1868 """ 1869 sql = self._select(query, fields, attributes) 1870 cache = attributes.get('cache', None) 1871 if cache and attributes.get('cacheable',False): 1872 del attributes['cache'] 1873 (cache_model, time_expire) = cache 1874 key = self.uri + '/' + sql 1875 if len(key)>200: key = hashlib_md5(key).hexdigest() 1876 args = (sql,fields,attributes) 1877 return cache_model( 1878 key, 1879 lambda self=self,args=args:self._select_aux(*args), 1880 time_expire) 1881 else: 1882 return self._select_aux(sql,fields,attributes)
1883
1884 - def _count(self, query, distinct=None):
1885 tablenames = self.tables(query) 1886 if query: 1887 if use_common_filters(query): 1888 query = self.common_filter(query, tablenames) 1889 sql_w = ' WHERE ' + self.expand(query) 1890 else: 1891 sql_w = '' 1892 sql_t = ','.join(self.table_alias(t) for t in tablenames) 1893 if distinct: 1894 if isinstance(distinct,(list, tuple)): 1895 distinct = xorify(distinct) 1896 sql_d = self.expand(distinct) 1897 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1898 (sql_d, sql_t, sql_w) 1899 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1900
1901 - def count(self, query, distinct=None):
1902 self.execute(self._count(query, distinct)) 1903 return self.cursor.fetchone()[0]
1904
1905 - def tables(self, *queries):
1906 tables = set() 1907 for query in queries: 1908 if isinstance(query, Field): 1909 tables.add(query.tablename) 1910 elif isinstance(query, (Expression, Query)): 1911 if not query.first is None: 1912 tables = tables.union(self.tables(query.first)) 1913 if not query.second is None: 1914 tables = tables.union(self.tables(query.second)) 1915 return list(tables)
1916
1917 - def commit(self):
1918 if self.connection: 1919 return self.connection.commit()
1920
1921 - def rollback(self):
1922 if self.connection: 1923 return self.connection.rollback()
1924
1925 - def close_connection(self):
1926 if self.connection: 1927 r = self.connection.close() 1928 self.connection = None 1929 return r
1930
1931 - def distributed_transaction_begin(self, key):
1932 return
1933
1934 - def prepare(self, key):
1935 if self.connection: self.connection.prepare()
1936
1937 - def commit_prepared(self, key):
1938 if self.connection: self.connection.commit()
1939
1940 - def rollback_prepared(self, key):
1941 if self.connection: self.connection.rollback()
1942
1943 - def concat_add(self, tablename):
1944 return ', ADD '
1945
1946 - def constraint_name(self, table, fieldname):
1947 return '%s_%s__constraint' % (table,fieldname)
1948
1949 - def create_sequence_and_triggers(self, query, table, **args):
1950 self.execute(query)
1951 1952
1953 - def log_execute(self, *a, **b):
1954 if not self.connection: raise ValueError(a[0]) 1955 if not self.connection: return None 1956 command = a[0] 1957 if hasattr(self,'filter_sql_command'): 1958 command = self.filter_sql_command(command) 1959 if self.db._debug: 1960 LOGGER.debug('SQL: %s' % command) 1961 self.db._lastsql = command 1962 t0 = time.time() 1963 ret = self.cursor.execute(command, *a[1:], **b) 1964 self.db._timings.append((command,time.time()-t0)) 1965 del self.db._timings[:-TIMINGSSIZE] 1966 return ret
1967
1968 - def execute(self, *a, **b):
1969 return self.log_execute(*a, **b)
1970
1971 - def represent(self, obj, fieldtype):
1972 field_is_type = fieldtype.startswith 1973 if isinstance(obj, CALLABLETYPES): 1974 obj = obj() 1975 if isinstance(fieldtype, SQLCustomType): 1976 value = fieldtype.encoder(obj) 1977 if fieldtype.type in ('string','text', 'json'): 1978 return self.adapt(value) 1979 return value 1980 if isinstance(obj, (Expression, Field)): 1981 return str(obj) 1982 if field_is_type('list:'): 1983 if not obj: 1984 obj = [] 1985 elif not isinstance(obj, (list, tuple)): 1986 obj = [obj] 1987 if field_is_type('list:string'): 1988 obj = map(str,obj) 1989 else: 1990 obj = map(int,[o for o in obj if o != '']) 1991 # we don't want to bar_encode json objects 1992 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1993 obj = bar_encode(obj) 1994 if obj is None: 1995 return 'NULL' 1996 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1997 return 'NULL' 1998 r = self.represent_exceptions(obj, fieldtype) 1999 if not r is None: 2000 return r 2001 if fieldtype == 'boolean': 2002 if obj and not str(obj)[:1].upper() in '0F': 2003 return self.smart_adapt(self.TRUE) 2004 else: 2005 return self.smart_adapt(self.FALSE) 2006 if fieldtype == 'id' or fieldtype == 'integer': 2007 return str(long(obj)) 2008 if field_is_type('decimal'): 2009 return str(obj) 2010 elif field_is_type('reference'): # reference 2011 # check for tablename first 2012 referenced = fieldtype[9:].strip() 2013 if referenced in self.db.tables: 2014 return str(long(obj)) 2015 p = referenced.partition('.') 2016 if p[2] != '': 2017 try: 2018 ftype = self.db[p[0]][p[2]].type 2019 return self.represent(obj, ftype) 2020 except (ValueError, KeyError): 2021 return repr(obj) 2022 elif isinstance(obj, (Row, Reference)): 2023 return str(obj['id']) 2024 return str(long(obj)) 2025 elif fieldtype == 'double': 2026 return repr(float(obj)) 2027 if isinstance(obj, unicode): 2028 obj = obj.encode(self.db_codec) 2029 if fieldtype == 'blob': 2030 obj = base64.b64encode(str(obj)) 2031 elif fieldtype == 'date': 2032 if isinstance(obj, (datetime.date, datetime.datetime)): 2033 obj = obj.isoformat()[:10] 2034 else: 2035 obj = str(obj) 2036 elif fieldtype == 'datetime': 2037 if isinstance(obj, datetime.datetime): 2038 obj = obj.isoformat(self.T_SEP)[:19] 2039 elif isinstance(obj, datetime.date): 2040 obj = obj.isoformat()[:10]+self.T_SEP+'00:00:00' 2041 else: 2042 obj = str(obj) 2043 elif fieldtype == 'time': 2044 if isinstance(obj, datetime.time): 2045 obj = obj.isoformat()[:10] 2046 else: 2047 obj = str(obj) 2048 elif fieldtype == 'json': 2049 if not self.native_json: 2050 if have_serializers: 2051 obj = serializers.json(obj) 2052 elif simplejson: 2053 obj = simplejson.dumps(obj) 2054 else: 2055 raise RuntimeError("missing simplejson") 2056 if not isinstance(obj,bytes): 2057 obj = bytes(obj) 2058 try: 2059 obj.decode(self.db_codec) 2060 except: 2061 obj = obj.decode('latin1').encode(self.db_codec) 2062 return self.adapt(obj)
2063
2064 - def represent_exceptions(self, obj, fieldtype):
2065 return None
2066
2067 - def lastrowid(self, table):
2068 return None
2069
2070 - def rowslice(self, rows, minimum=0, maximum=None):
2071 """ 2072 By default this function does nothing; 2073 overload when db does not do slicing. 2074 """ 2075 return rows
2076
2077 - def parse_value(self, value, field_type, blob_decode=True):
2078 if field_type != 'blob' and isinstance(value, str): 2079 try: 2080 value = value.decode(self.db._db_codec) 2081 except Exception: 2082 pass 2083 if isinstance(value, unicode): 2084 value = value.encode('utf-8') 2085 if isinstance(field_type, SQLCustomType): 2086 value = field_type.decoder(value) 2087 if not isinstance(field_type, str) or value is None: 2088 return value 2089 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 2090 return value 2091 elif field_type.startswith('geo'): 2092 return value 2093 elif field_type == 'blob' and not blob_decode: 2094 return value 2095 else: 2096 key = REGEX_TYPE.match(field_type).group(0) 2097 return self.parsemap[key](value,field_type)
2098
2099 - def parse_reference(self, value, field_type):
2100 referee = field_type[10:].strip() 2101 if not '.' in referee: 2102 value = Reference(value) 2103 value._table, value._record = self.db[referee], None 2104 return value
2105
2106 - def parse_boolean(self, value, field_type):
2107 return value == self.TRUE or str(value)[:1].lower() == 't'
2108
2109 - def parse_date(self, value, field_type):
2110 if isinstance(value, datetime.datetime): 2111 return value.date() 2112 if not isinstance(value, (datetime.date,datetime.datetime)): 2113 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 2114 value = datetime.date(y, m, d) 2115 return value
2116
2117 - def parse_time(self, value, field_type):
2118 if not isinstance(value, datetime.time): 2119 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 2120 if len(time_items) == 3: 2121 (h, mi, s) = time_items 2122 else: 2123 (h, mi, s) = time_items + [0] 2124 value = datetime.time(h, mi, s) 2125 return value
2126
2127 - def parse_datetime(self, value, field_type):
2128 if not isinstance(value, datetime.datetime): 2129 value = str(value) 2130 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 2131 if '+' in timezone: 2132 ms,tz = timezone.split('+') 2133 h,m = tz.split(':') 2134 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 2135 elif '-' in timezone: 2136 ms,tz = timezone.split('-') 2137 h,m = tz.split(':') 2138 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 2139 else: 2140 dt = None 2141 (y, m, d) = map(int,date_part.split('-')) 2142 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 2143 while len(time_parts)<3: time_parts.append(0) 2144 time_items = map(int,time_parts) 2145 (h, mi, s) = time_items 2146 value = datetime.datetime(y, m, d, h, mi, s) 2147 if dt: 2148 value = value + dt 2149 return value
2150
2151 - def parse_blob(self, value, field_type):
2152 return base64.b64decode(str(value))
2153
2154 - def parse_decimal(self, value, field_type):
2155 decimals = int(field_type[8:-1].split(',')[-1]) 2156 if self.dbengine in ('sqlite', 'spatialite'): 2157 value = ('%.' + str(decimals) + 'f') % value 2158 if not isinstance(value, decimal.Decimal): 2159 value = decimal.Decimal(str(value)) 2160 return value
2161
2162 - def parse_list_integers(self, value, field_type):
2163 if not isinstance(self, NoSQLAdapter): 2164 value = bar_decode_integer(value) 2165 return value
2166
2167 - def parse_list_references(self, value, field_type):
2168 if not isinstance(self, NoSQLAdapter): 2169 value = bar_decode_integer(value) 2170 return [self.parse_reference(r, field_type[5:]) for r in value]
2171
2172 - def parse_list_strings(self, value, field_type):
2173 if not isinstance(self, NoSQLAdapter): 2174 value = bar_decode_string(value) 2175 return value
2176
2177 - def parse_id(self, value, field_type):
2178 return long(value)
2179
2180 - def parse_integer(self, value, field_type):
2181 return long(value)
2182
2183 - def parse_double(self, value, field_type):
2184 return float(value)
2185
2186 - def parse_json(self, value, field_type):
2187 if not self.native_json: 2188 if not isinstance(value, basestring): 2189 raise RuntimeError('json data not a string') 2190 if isinstance(value, unicode): 2191 value = value.encode('utf-8') 2192 if have_serializers: 2193 value = serializers.loads_json(value) 2194 elif simplejson: 2195 value = simplejson.loads(value) 2196 else: 2197 raise RuntimeError("missing simplejson") 2198 return value
2199
2200 - def build_parsemap(self):
2201 self.parsemap = { 2202 'id':self.parse_id, 2203 'integer':self.parse_integer, 2204 'bigint':self.parse_integer, 2205 'float':self.parse_double, 2206 'double':self.parse_double, 2207 'reference':self.parse_reference, 2208 'boolean':self.parse_boolean, 2209 'date':self.parse_date, 2210 'time':self.parse_time, 2211 'datetime':self.parse_datetime, 2212 'blob':self.parse_blob, 2213 'decimal':self.parse_decimal, 2214 'json':self.parse_json, 2215 'list:integer':self.parse_list_integers, 2216 'list:reference':self.parse_list_references, 2217 'list:string':self.parse_list_strings, 2218 }
2219
2220 - def parse(self, rows, fields, colnames, blob_decode=True, 2221 cacheable = False):
2222 db = self.db 2223 virtualtables = [] 2224 new_rows = [] 2225 tmps = [] 2226 for colname in colnames: 2227 col_m = self.REGEX_TABLE_DOT_FIELD.match(colname) 2228 if not col_m: 2229 tmps.append(None) 2230 else: 2231 tablename, fieldname = col_m.groups() 2232 table = db[tablename] 2233 field = table[fieldname] 2234 ft = field.type 2235 tmps.append((tablename, fieldname, table, field, ft)) 2236 for (i,row) in enumerate(rows): 2237 new_row = Row() 2238 for (j,colname) in enumerate(colnames): 2239 value = row[j] 2240 tmp = tmps[j] 2241 if tmp: 2242 (tablename,fieldname,table,field,ft) = tmp 2243 colset = new_row.get(tablename, None) 2244 if colset is None: 2245 colset = new_row[tablename] = Row() 2246 if tablename not in virtualtables: 2247 virtualtables.append(tablename) 2248 value = self.parse_value(value,ft,blob_decode) 2249 if field.filter_out: 2250 value = field.filter_out(value) 2251 colset[fieldname] = value 2252 2253 # for backward compatibility 2254 if ft=='id' and fieldname!='id' and \ 2255 not 'id' in table.fields: 2256 colset['id'] = value 2257 2258 if ft == 'id' and not cacheable: 2259 # temporary hack to deal with 2260 # GoogleDatastoreAdapter 2261 # references 2262 if isinstance(self, GoogleDatastoreAdapter): 2263 id = value.key.id() if self.use_ndb else value.key().id_or_name() 2264 colset[fieldname] = id 2265 colset.gae_item = value 2266 else: 2267 id = value 2268 colset.update_record = RecordUpdater(colset,table,id) 2269 colset.delete_record = RecordDeleter(table,id) 2270 if table._db._lazy_tables: 2271 colset['__get_lazy_reference__'] = LazyReferenceGetter(table, id) 2272 for rfield in table._referenced_by: 2273 referee_link = db._referee_name and \ 2274 db._referee_name % dict( 2275 table=rfield.tablename,field=rfield.name) 2276 if referee_link and not referee_link in colset: 2277 colset[referee_link] = LazySet(rfield,id) 2278 else: 2279 if not '_extra' in new_row: 2280 new_row['_extra'] = Row() 2281 new_row['_extra'][colname] = \ 2282 self.parse_value(value, 2283 fields[j].type,blob_decode) 2284 new_column_name = \ 2285 REGEX_SELECT_AS_PARSER.search(colname) 2286 if not new_column_name is None: 2287 column_name = new_column_name.groups(0) 2288 setattr(new_row,column_name[0],value) 2289 new_rows.append(new_row) 2290 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2291 2292 2293 for tablename in virtualtables: 2294 table = db[tablename] 2295 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2296 if isinstance(v,FieldVirtual)] 2297 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2298 if isinstance(v,FieldMethod)] 2299 if fields_virtual or fields_lazy: 2300 for row in rowsobj.records: 2301 box = row[tablename] 2302 for f,v in fields_virtual: 2303 try: 2304 box[f] = v.f(row) 2305 except AttributeError: 2306 pass # not enough fields to define virtual field 2307 for f,v in fields_lazy: 2308 try: 2309 box[f] = (v.handler or VirtualCommand)(v.f,row) 2310 except AttributeError: 2311 pass # not enough fields to define virtual field 2312 2313 ### old style virtual fields 2314 for item in table.virtualfields: 2315 try: 2316 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2317 except (KeyError, AttributeError): 2318 # to avoid breaking virtualfields when partial select 2319 pass 2320 return rowsobj
2321
2322 - def common_filter(self, query, tablenames):
2323 tenant_fieldname = self.db._request_tenant 2324 2325 for tablename in tablenames: 2326 table = self.db[tablename] 2327 2328 # deal with user provided filters 2329 if table._common_filter != None: 2330 query = query & table._common_filter(query) 2331 2332 # deal with multi_tenant filters 2333 if tenant_fieldname in table: 2334 default = table[tenant_fieldname].default 2335 if not default is None: 2336 newquery = table[tenant_fieldname] == default 2337 if query is None: 2338 query = newquery 2339 else: 2340 query = query & newquery 2341 return query
2342
2343 - def CASE(self,query,t,f):
2344 def represent(x): 2345 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2346 if x is None: return 'NULL' 2347 elif isinstance(x,Expression): return str(x) 2348 else: return self.represent(x,types.get(type(x),'string'))
2349 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2350 (self.expand(query),represent(t),represent(f))) 2351
2352 - def sqlsafe_table(self, tablename, ot=None):
2353 if ot is not None: 2354 return ('%s AS ' + self.QUOTE_TEMPLATE) % (ot, tablename) 2355 return self.QUOTE_TEMPLATE % tablename
2356
2357 - def sqlsafe_field(self, fieldname):
2358 return self.QUOTE_TEMPLATE % fieldname
2359
2360 ################################################################################### 2361 # List of all the available adapters; they all extend BaseAdapter. 2362 ################################################################################### 2363 2364 -class SQLiteAdapter(BaseAdapter):
2365 drivers = ('sqlite2','sqlite3') 2366 2367 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2368
2369 - def EXTRACT(self,field,what):
2370 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2371 2372 @staticmethod
2373 - def web2py_extract(lookup, s):
2374 table = { 2375 'year': (0, 4), 2376 'month': (5, 7), 2377 'day': (8, 10), 2378 'hour': (11, 13), 2379 'minute': (14, 16), 2380 'second': (17, 19), 2381 } 2382 try: 2383 if lookup != 'epoch': 2384 (i, j) = table[lookup] 2385 return int(s[i:j]) 2386 else: 2387 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2388 except: 2389 return None
2390 2391 @staticmethod
2392 - def web2py_regexp(expression, item):
2393 return re.compile(expression).search(item) is not None
2394
2395 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2396 credential_decoder=IDENTITY, driver_args={}, 2397 adapter_args={}, do_connect=True, after_connection=None):
2398 self.db = db 2399 self.dbengine = "sqlite" 2400 self.uri = uri 2401 self.adapter_args = adapter_args 2402 if do_connect: self.find_driver(adapter_args) 2403 self.pool_size = 0 2404 self.folder = folder 2405 self.db_codec = db_codec 2406 self._after_connection = after_connection 2407 self.find_or_make_work_folder() 2408 path_encoding = sys.getfilesystemencoding() \ 2409 or locale.getdefaultlocale()[1] or 'utf8' 2410 if uri.startswith('sqlite:memory'): 2411 self.dbpath = ':memory:' 2412 else: 2413 self.dbpath = uri.split('://',1)[1] 2414 if self.dbpath[0] != '/': 2415 if PYTHON_VERSION[0] == 2: 2416 self.dbpath = pjoin( 2417 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2418 else: 2419 self.dbpath = pjoin(self.folder, self.dbpath) 2420 if not 'check_same_thread' in driver_args: 2421 driver_args['check_same_thread'] = False 2422 if not 'detect_types' in driver_args and do_connect: 2423 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2424 def connector(dbpath=self.dbpath, driver_args=driver_args): 2425 return self.driver.Connection(dbpath, **driver_args)
2426 self.connector = connector 2427 if do_connect: self.reconnect()
2428
2429 - def after_connection(self):
2430 self.connection.create_function('web2py_extract', 2, 2431 SQLiteAdapter.web2py_extract) 2432 self.connection.create_function("REGEXP", 2, 2433 SQLiteAdapter.web2py_regexp) 2434 2435 if self.adapter_args.get('foreign_keys',True): 2436 self.execute('PRAGMA foreign_keys=ON;')
2437
2438 - def _truncate(self, table, mode=''):
2439 tablename = table._tablename 2440 return ['DELETE FROM %s;' % tablename, 2441 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2442
2443 - def lastrowid(self, table):
2444 return self.cursor.lastrowid
2445
2446 - def REGEXP(self,first,second):
2447 return '(%s REGEXP %s)' % (self.expand(first), 2448 self.expand(second,'string'))
2449
2450 - def select(self, query, fields, attributes):
2451 """ 2452 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2453 Note that the entire database, rather than one record, is locked 2454 (it will be locked eventually anyway by the following UPDATE). 2455 """ 2456 if attributes.get('for_update', False) and not 'cache' in attributes: 2457 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2458 return super(SQLiteAdapter, self).select(query, fields, attributes)
2459
2460 -class SpatiaLiteAdapter(SQLiteAdapter):
2461 drivers = ('sqlite3','sqlite2') 2462 2463 types = copy.copy(BaseAdapter.types) 2464 types.update(geometry='GEOMETRY') 2465
2466 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2467 credential_decoder=IDENTITY, driver_args={}, 2468 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2469 self.db = db 2470 self.dbengine = "spatialite" 2471 self.uri = uri 2472 if do_connect: self.find_driver(adapter_args) 2473 self.pool_size = 0 2474 self.folder = folder 2475 self.db_codec = db_codec 2476 self._after_connection = after_connection 2477 self.find_or_make_work_folder() 2478 self.srid = srid 2479 path_encoding = sys.getfilesystemencoding() \ 2480 or locale.getdefaultlocale()[1] or 'utf8' 2481 if uri.startswith('spatialite:memory'): 2482 self.dbpath = ':memory:' 2483 else: 2484 self.dbpath = uri.split('://',1)[1] 2485 if self.dbpath[0] != '/': 2486 self.dbpath = pjoin( 2487 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2488 if not 'check_same_thread' in driver_args: 2489 driver_args['check_same_thread'] = False 2490 if not 'detect_types' in driver_args and do_connect: 2491 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2492 def connector(dbpath=self.dbpath, driver_args=driver_args): 2493 return self.driver.Connection(dbpath, **driver_args)
2494 self.connector = connector 2495 if do_connect: self.reconnect()
2496
2497 - def after_connection(self):
2498 self.connection.enable_load_extension(True) 2499 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2500 # Linux uses libspatialite.so 2501 # Mac OS X uses libspatialite.dylib 2502 libspatialite = SPATIALLIBS[platform.system()] 2503 self.execute(r'SELECT load_extension("%s");' % libspatialite) 2504 2505 self.connection.create_function('web2py_extract', 2, 2506 SQLiteAdapter.web2py_extract) 2507 self.connection.create_function("REGEXP", 2, 2508 SQLiteAdapter.web2py_regexp)
2509 2510 # GIS functions 2511
2512 - def ST_ASGEOJSON(self, first, second):
2513 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2514 second['precision'], second['options'])
2515
2516 - def ST_ASTEXT(self, first):
2517 return 'AsText(%s)' %(self.expand(first))
2518
2519 - def ST_CONTAINS(self, first, second):
2520 return 'Contains(%s,%s)' %(self.expand(first), 2521 self.expand(second, first.type))
2522
2523 - def ST_DISTANCE(self, first, second):
2524 return 'Distance(%s,%s)' %(self.expand(first), 2525 self.expand(second, first.type))
2526
2527 - def ST_EQUALS(self, first, second):
2528 return 'Equals(%s,%s)' %(self.expand(first), 2529 self.expand(second, first.type))
2530
2531 - def ST_INTERSECTS(self, first, second):
2532 return 'Intersects(%s,%s)' %(self.expand(first), 2533 self.expand(second, first.type))
2534
2535 - def ST_OVERLAPS(self, first, second):
2536 return 'Overlaps(%s,%s)' %(self.expand(first), 2537 self.expand(second, first.type))
2538
2539 - def ST_SIMPLIFY(self, first, second):
2540 return 'Simplify(%s,%s)' %(self.expand(first), 2541 self.expand(second, 'double'))
2542
2543 - def ST_TOUCHES(self, first, second):
2544 return 'Touches(%s,%s)' %(self.expand(first), 2545 self.expand(second, first.type))
2546
2547 - def ST_WITHIN(self, first, second):
2548 return 'Within(%s,%s)' %(self.expand(first), 2549 self.expand(second, first.type))
2550
2551 - def represent(self, obj, fieldtype):
2552 field_is_type = fieldtype.startswith 2553 if field_is_type('geo'): 2554 srid = 4326 # Spatialite default srid for geometry 2555 geotype, parms = fieldtype[:-1].split('(') 2556 parms = parms.split(',') 2557 if len(parms) >= 2: 2558 schema, srid = parms[:2] 2559 # if field_is_type('geometry'): 2560 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2561 # elif field_is_type('geography'): 2562 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2563 # else: 2564 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2565 return value 2566 return BaseAdapter.represent(self, obj, fieldtype)
2567
2568 2569 -class JDBCSQLiteAdapter(SQLiteAdapter):
2570 drivers = ('zxJDBC_sqlite',) 2571
2572 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2573 credential_decoder=IDENTITY, driver_args={}, 2574 adapter_args={}, do_connect=True, after_connection=None):
2575 self.db = db 2576 self.dbengine = "sqlite" 2577 self.uri = uri 2578 if do_connect: self.find_driver(adapter_args) 2579 self.pool_size = pool_size 2580 self.folder = folder 2581 self.db_codec = db_codec 2582 self._after_connection = after_connection 2583 self.find_or_make_work_folder() 2584 path_encoding = sys.getfilesystemencoding() \ 2585 or locale.getdefaultlocale()[1] or 'utf8' 2586 if uri.startswith('sqlite:memory'): 2587 self.dbpath = ':memory:' 2588 else: 2589 self.dbpath = uri.split('://',1)[1] 2590 if self.dbpath[0] != '/': 2591 self.dbpath = pjoin( 2592 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2593 def connector(dbpath=self.dbpath,driver_args=driver_args): 2594 return self.driver.connect( 2595 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2596 **driver_args)
2597 self.connector = connector 2598 if do_connect: self.reconnect()
2599
2600 - def after_connection(self):
2601 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2602 self.connection.create_function('web2py_extract', 2, 2603 SQLiteAdapter.web2py_extract)
2604
2605 - def execute(self, a):
2606 return self.log_execute(a)
2607
2608 2609 -class MySQLAdapter(BaseAdapter):
2610 drivers = ('MySQLdb','pymysql', 'mysqlconnector') 2611 2612 commit_on_alter_table = True 2613 support_distributed_transaction = True 2614 types = { 2615 'boolean': 'CHAR(1)', 2616 'string': 'VARCHAR(%(length)s)', 2617 'text': 'LONGTEXT', 2618 'json': 'LONGTEXT', 2619 'password': 'VARCHAR(%(length)s)', 2620 'blob': 'LONGBLOB', 2621 'upload': 'VARCHAR(%(length)s)', 2622 'integer': 'INT', 2623 'bigint': 'BIGINT', 2624 'float': 'FLOAT', 2625 'double': 'DOUBLE', 2626 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2627 'date': 'DATE', 2628 'time': 'TIME', 2629 'datetime': 'DATETIME', 2630 'id': 'INT AUTO_INCREMENT NOT NULL', 2631 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2632 'list:integer': 'LONGTEXT', 2633 'list:string': 'LONGTEXT', 2634 'list:reference': 'LONGTEXT', 2635 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2636 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2637 'reference FK': ', CONSTRAINT `FK_%(constraint_name)s` FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2638 } 2639 2640 QUOTE_TEMPLATE = "`%s`" 2641
2642 - def varquote(self,name):
2643 return varquote_aux(name,'`%s`')
2644
2645 - def RANDOM(self):
2646 return 'RAND()'
2647
2648 - def SUBSTRING(self,field,parameters):
2649 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2650 parameters[0], parameters[1])
2651
2652 - def EPOCH(self, first):
2653 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2654
2655 - def CONCAT(self, *items):
2656 return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
2657
2658 - def REGEXP(self,first,second):
2659 return '(%s REGEXP %s)' % (self.expand(first), 2660 self.expand(second,'string'))
2661
2662 - def _drop(self,table,mode):
2663 # breaks db integrity but without this mysql does not drop table 2664 table_rname = table.sqlsafe 2665 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table_rname, 2666 'SET FOREIGN_KEY_CHECKS=1;']
2667
2668 - def _insert_empty(self, table):
2669 return 'INSERT INTO %s VALUES (DEFAULT);' % (table.sqlsafe)
2670
2671 - def distributed_transaction_begin(self,key):
2672 self.execute('XA START;')
2673
2674 - def prepare(self,key):
2675 self.execute("XA END;") 2676 self.execute("XA PREPARE;")
2677
2678 - def commit_prepared(self,ley):
2679 self.execute("XA COMMIT;")
2680
2681 - def rollback_prepared(self,key):
2682 self.execute("XA ROLLBACK;")
2683 2684 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2685
2686 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2687 credential_decoder=IDENTITY, driver_args={}, 2688 adapter_args={}, do_connect=True, after_connection=None):
2689 self.db = db 2690 self.dbengine = "mysql" 2691 self.uri = uri 2692 if do_connect: self.find_driver(adapter_args,uri) 2693 self.pool_size = pool_size 2694 self.folder = folder 2695 self.db_codec = db_codec 2696 self._after_connection = after_connection 2697 self.find_or_make_work_folder() 2698 ruri = uri.split('://',1)[1] 2699 m = self.REGEX_URI.match(ruri) 2700 if not m: 2701 raise SyntaxError( 2702 "Invalid URI string in DAL: %s" % self.uri) 2703 user = credential_decoder(m.group('user')) 2704 if not user: 2705 raise SyntaxError('User required') 2706 password = credential_decoder(m.group('password')) 2707 if not password: 2708 password = '' 2709 host = m.group('host') 2710 if not host: 2711 raise SyntaxError('Host name required') 2712 db = m.group('db') 2713 if not db: 2714 raise SyntaxError('Database name required') 2715 port = int(m.group('port') or '3306') 2716 charset = m.group('charset') or 'utf8' 2717 driver_args.update(db=db, 2718 user=credential_decoder(user), 2719 passwd=credential_decoder(password), 2720 host=host, 2721 port=port, 2722 charset=charset) 2723 2724 2725 def connector(driver_args=driver_args): 2726 return self.driver.connect(**driver_args)
2727 self.connector = connector 2728 if do_connect: self.reconnect()
2729
2730 - def after_connection(self):
2731 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2732 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2733
2734 - def lastrowid(self,table):
2735 self.execute('select last_insert_id();') 2736 return int(self.cursor.fetchone()[0])
2737
2738 2739 -class PostgreSQLAdapter(BaseAdapter):
2740 drivers = ('psycopg2','pg8000') 2741 2742 QUOTE_TEMPLATE = '"%s"' 2743 2744 support_distributed_transaction = True 2745 types = { 2746 'boolean': 'CHAR(1)', 2747 'string': 'VARCHAR(%(length)s)', 2748 'text': 'TEXT', 2749 'json': 'TEXT', 2750 'password': 'VARCHAR(%(length)s)', 2751 'blob': 'BYTEA', 2752 'upload': 'VARCHAR(%(length)s)', 2753 'integer': 'INTEGER', 2754 'bigint': 'BIGINT', 2755 'float': 'FLOAT', 2756 'double': 'FLOAT8', 2757 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2758 'date': 'DATE', 2759 'time': 'TIME', 2760 'datetime': 'TIMESTAMP', 2761 'id': 'SERIAL PRIMARY KEY', 2762 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2763 'list:integer': 'TEXT', 2764 'list:string': 'TEXT', 2765 'list:reference': 'TEXT', 2766 'geometry': 'GEOMETRY', 2767 'geography': 'GEOGRAPHY', 2768 'big-id': 'BIGSERIAL PRIMARY KEY', 2769 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2770 'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2771 'reference TFK': ' CONSTRAINT "FK_%(foreign_table)s_PK" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2772 2773 } 2774 2775
2776 - def varquote(self,name):
2777 return varquote_aux(name,'"%s"')
2778
2779 - def adapt(self,obj):
2780 if self.driver_name == 'psycopg2': 2781 return psycopg2_adapt(obj).getquoted() 2782 elif self.driver_name == 'pg8000': 2783 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2784 else: 2785 return "'%s'" % str(obj).replace("'","''")
2786
2787 - def sequence_name(self,table):
2788 return self.QUOTE_TEMPLATE % (table + '_id_seq')
2789
2790 - def RANDOM(self):
2791 return 'RANDOM()'
2792
2793 - def ADD(self, first, second):
2794 t = first.type 2795 if t in ('text','string','password', 'json', 'upload','blob'): 2796 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2797 else: 2798 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2799
2800 - def distributed_transaction_begin(self,key):
2801 return
2802
2803 - def prepare(self,key):
2804 self.execute("PREPARE TRANSACTION '%s';" % key)
2805
2806 - def commit_prepared(self,key):
2807 self.execute("COMMIT PREPARED '%s';" % key)
2808
2809 - def rollback_prepared(self,key):
2810 self.execute("ROLLBACK PREPARED '%s';" % key)
2811
2812 - def create_sequence_and_triggers(self, query, table, **args):
2813 # following lines should only be executed if table._sequence_name does not exist 2814 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2815 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2816 # % (table._tablename, table._fieldname, table._sequence_name)) 2817 self.execute(query)
2818 2819 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2820
2821 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2822 credential_decoder=IDENTITY, driver_args={}, 2823 adapter_args={}, do_connect=True, srid=4326, 2824 after_connection=None):
2825 self.db = db 2826 self.dbengine = "postgres" 2827 self.uri = uri 2828 if do_connect: self.find_driver(adapter_args,uri) 2829 self.pool_size = pool_size 2830 self.folder = folder 2831 self.db_codec = db_codec 2832 self._after_connection = after_connection 2833 self.srid = srid 2834 self.find_or_make_work_folder() 2835 ruri = uri.split('://',1)[1] 2836 m = self.REGEX_URI.match(ruri) 2837 if not m: 2838 raise SyntaxError("Invalid URI string in DAL") 2839 user = credential_decoder(m.group('user')) 2840 if not user: 2841 raise SyntaxError('User required') 2842 password = credential_decoder(m.group('password')) 2843 if not password: 2844 password = '' 2845 host = m.group('host') 2846 if not host: 2847 raise SyntaxError('Host name required') 2848 db = m.group('db') 2849 if not db: 2850 raise SyntaxError('Database name required') 2851 port = m.group('port') or '5432' 2852 sslmode = m.group('sslmode') 2853 if sslmode: 2854 msg = ("dbname='%s' user='%s' host='%s' " 2855 "port=%s password='%s' sslmode='%s'") \ 2856 % (db, user, host, port, password, sslmode) 2857 else: 2858 msg = ("dbname='%s' user='%s' host='%s' " 2859 "port=%s password='%s'") \ 2860 % (db, user, host, port, password) 2861 # choose diver according uri 2862 if self.driver: 2863 self.__version__ = "%s %s" % (self.driver.__name__, 2864 self.driver.__version__) 2865 else: 2866 self.__version__ = None 2867 def connector(msg=msg,driver_args=driver_args): 2868 return self.driver.connect(msg,**driver_args)
2869 self.connector = connector 2870 if do_connect: self.reconnect()
2871
2872 - def after_connection(self):
2873 self.connection.set_client_encoding('UTF8') 2874 self.execute("SET standard_conforming_strings=on;") 2875 self.try_json()
2876
2877 - def lastrowid(self,table = None):
2878 self.execute("select lastval()") 2879 return int(self.cursor.fetchone()[0])
2880
2881 - def try_json(self):
2882 # check JSON data type support 2883 # (to be added to after_connection) 2884 if self.driver_name == "pg8000": 2885 supports_json = self.connection.server_version >= "9.2.0" 2886 elif (self.driver_name == "psycopg2") and \ 2887 (self.driver.__version__ >= "2.0.12"): 2888 supports_json = self.connection.server_version >= 90200 2889 elif self.driver_name == "zxJDBC": 2890 supports_json = self.connection.dbversion >= "9.2.0" 2891 else: supports_json = None 2892 if supports_json: 2893 self.types["json"] = "JSON" 2894 self.native_json = True 2895 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2896
2897 - def LIKE(self,first,second):
2898 args = (self.expand(first), self.expand(second,'string')) 2899 if not first.type in ('string', 'text', 'json'): 2900 return '(%s LIKE %s)' % ( 2901 self.CAST(args[0], 'CHAR(%s)' % first.length), args[1]) 2902 else: 2903 return '(%s LIKE %s)' % args
2904
2905 - def ILIKE(self,first,second):
2906 args = (self.expand(first), self.expand(second,'string')) 2907 if not first.type in ('string', 'text', 'json'): 2908 return '(%s LIKE %s)' % ( 2909 self.CAST(args[0], 'CHAR(%s)' % first.length), args[1]) 2910 else: 2911 return '(%s ILIKE %s)' % args
2912
2913 - def REGEXP(self,first,second):
2914 return '(%s ~ %s)' % (self.expand(first), 2915 self.expand(second,'string'))
2916
2917 - def STARTSWITH(self,first,second):
2918 return '(%s ILIKE %s)' % (self.expand(first), 2919 self.expand(second+'%','string'))
2920
2921 - def ENDSWITH(self,first,second):
2922 return '(%s ILIKE %s)' % (self.expand(first), 2923 self.expand('%'+second,'string'))
2924 2925 # GIS functions 2926
2927 - def ST_ASGEOJSON(self, first, second):
2928 """ 2929 http://postgis.org/docs/ST_AsGeoJSON.html 2930 """ 2931 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2932 self.expand(first), second['precision'], second['options'])
2933
2934 - def ST_ASTEXT(self, first):
2935 """ 2936 http://postgis.org/docs/ST_AsText.html 2937 """ 2938 return 'ST_AsText(%s)' %(self.expand(first))
2939
2940 - def ST_X(self, first):
2941 """ 2942 http://postgis.org/docs/ST_X.html 2943 """ 2944 return 'ST_X(%s)' %(self.expand(first))
2945
2946 - def ST_Y(self, first):
2947 """ 2948 http://postgis.org/docs/ST_Y.html 2949 """ 2950 return 'ST_Y(%s)' %(self.expand(first))
2951
2952 - def ST_CONTAINS(self, first, second):
2953 """ 2954 http://postgis.org/docs/ST_Contains.html 2955 """ 2956 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2957
2958 - def ST_DISTANCE(self, first, second):
2959 """ 2960 http://postgis.org/docs/ST_Distance.html 2961 """ 2962 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2963
2964 - def ST_EQUALS(self, first, second):
2965 """ 2966 http://postgis.org/docs/ST_Equals.html 2967 """ 2968 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2969
2970 - def ST_INTERSECTS(self, first, second):
2971 """ 2972 http://postgis.org/docs/ST_Intersects.html 2973 """ 2974 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2975
2976 - def ST_OVERLAPS(self, first, second):
2977 """ 2978 http://postgis.org/docs/ST_Overlaps.html 2979 """ 2980 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2981
2982 - def ST_SIMPLIFY(self, first, second):
2983 """ 2984 http://postgis.org/docs/ST_Simplify.html 2985 """ 2986 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2987
2988 - def ST_TOUCHES(self, first, second):
2989 """ 2990 http://postgis.org/docs/ST_Touches.html 2991 """ 2992 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2993
2994 - def ST_WITHIN(self, first, second):
2995 """ 2996 http://postgis.org/docs/ST_Within.html 2997 """ 2998 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2999
3000 - def ST_DWITHIN(self, first, (second, third)):
3001 """ 3002 http://postgis.org/docs/ST_DWithin.html 3003 """ 3004 return 'ST_DWithin(%s,%s,%s)' %(self.expand(first), 3005 self.expand(second, first.type), 3006 self.expand(third, 'double'))
3007
3008 - def represent(self, obj, fieldtype):
3009 field_is_type = fieldtype.startswith 3010 if field_is_type('geo'): 3011 srid = 4326 # postGIS default srid for geometry 3012 geotype, parms = fieldtype[:-1].split('(') 3013 parms = parms.split(',') 3014 if len(parms) >= 2: 3015 schema, srid = parms[:2] 3016 if field_is_type('geometry'): 3017 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 3018 elif field_is_type('geography'): 3019 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 3020 # else: 3021 # raise SyntaxError('Invalid field type %s' %fieldtype) 3022 return value 3023 return BaseAdapter.represent(self, obj, fieldtype)
3024
3025 - def _drop(self, table, mode='restrict'):
3026 if mode not in ['restrict', 'cascade', '']: 3027 raise ValueError('Invalid mode: %s' % mode) 3028 return ['DROP TABLE ' + table.sqlsafe + ' ' + str(mode) + ';']
3029
3030 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
3031 drivers = ('psycopg2','pg8000') 3032 3033 types = { 3034 'boolean': 'CHAR(1)', 3035 'string': 'VARCHAR(%(length)s)', 3036 'text': 'TEXT', 3037 'json': 'TEXT', 3038 'password': 'VARCHAR(%(length)s)', 3039 'blob': 'BYTEA', 3040 'upload': 'VARCHAR(%(length)s)', 3041 'integer': 'INTEGER', 3042 'bigint': 'BIGINT', 3043 'float': 'FLOAT', 3044 'double': 'FLOAT8', 3045 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3046 'date': 'DATE', 3047 'time': 'TIME', 3048 'datetime': 'TIMESTAMP', 3049 'id': 'SERIAL PRIMARY KEY', 3050 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3051 'list:integer': 'BIGINT[]', 3052 'list:string': 'TEXT[]', 3053 'list:reference': 'BIGINT[]', 3054 'geometry': 'GEOMETRY', 3055 'geography': 'GEOGRAPHY', 3056 'big-id': 'BIGSERIAL PRIMARY KEY', 3057 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3058 } 3059
3060 - def parse_list_integers(self, value, field_type):
3061 return value
3062
3063 - def parse_list_references(self, value, field_type):
3064 return [self.parse_reference(r, field_type[5:]) for r in value]
3065
3066 - def parse_list_strings(self, value, field_type):
3067 return value
3068
3069 - def represent(self, obj, fieldtype):
3070 field_is_type = fieldtype.startswith 3071 if field_is_type('list:'): 3072 if not obj: 3073 obj = [] 3074 elif not isinstance(obj, (list, tuple)): 3075 obj = [obj] 3076 if field_is_type('list:string'): 3077 obj = map(str,obj) 3078 else: 3079 obj = map(int,obj) 3080 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 3081 return BaseAdapter.represent(self, obj, fieldtype)
3082
3083 3084 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
3085 drivers = ('zxJDBC',) 3086 3087 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3088
3089 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3090 credential_decoder=IDENTITY, driver_args={}, 3091 adapter_args={}, do_connect=True, after_connection=None ):
3092 self.db = db 3093 self.dbengine = "postgres" 3094 self.uri = uri 3095 if do_connect: self.find_driver(adapter_args,uri) 3096 self.pool_size = pool_size 3097 self.folder = folder 3098 self.db_codec = db_codec 3099 self._after_connection = after_connection 3100 self.find_or_make_work_folder() 3101 ruri = uri.split('://',1)[1] 3102 m = self.REGEX_URI.match(ruri) 3103 if not m: 3104 raise SyntaxError("Invalid URI string in DAL") 3105 user = credential_decoder(m.group('user')) 3106 if not user: 3107 raise SyntaxError('User required') 3108 password = credential_decoder(m.group('password')) 3109 if not password: 3110 password = '' 3111 host = m.group('host') 3112 if not host: 3113 raise SyntaxError('Host name required') 3114 db = m.group('db') 3115 if not db: 3116 raise SyntaxError('Database name required') 3117 port = m.group('port') or '5432' 3118 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 3119 def connector(msg=msg,driver_args=driver_args): 3120 return self.driver.connect(*msg,**driver_args)
3121 self.connector = connector 3122 if do_connect: self.reconnect()
3123
3124 - def after_connection(self):
3125 self.connection.set_client_encoding('UTF8') 3126 self.execute('BEGIN;') 3127 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 3128 self.try_json()
3129
3130 3131 -class OracleAdapter(BaseAdapter):
3132 drivers = ('cx_Oracle',) 3133 3134 commit_on_alter_table = False 3135 types = { 3136 'boolean': 'CHAR(1)', 3137 'string': 'VARCHAR2(%(length)s)', 3138 'text': 'CLOB', 3139 'json': 'CLOB', 3140 'password': 'VARCHAR2(%(length)s)', 3141 'blob': 'CLOB', 3142 'upload': 'VARCHAR2(%(length)s)', 3143 'integer': 'INT', 3144 'bigint': 'NUMBER', 3145 'float': 'FLOAT', 3146 'double': 'BINARY_DOUBLE', 3147 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3148 'date': 'DATE', 3149 'time': 'CHAR(8)', 3150 'datetime': 'DATE', 3151 'id': 'NUMBER PRIMARY KEY', 3152 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3153 'list:integer': 'CLOB', 3154 'list:string': 'CLOB', 3155 'list:reference': 'CLOB', 3156 'big-id': 'NUMBER PRIMARY KEY', 3157 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3158 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3159 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3160 } 3161 3162
3163 - def trigger_name(self,tablename):
3164 return '%s_trigger' % tablename
3165
3166 - def LEFT_JOIN(self):
3167 return 'LEFT OUTER JOIN'
3168
3169 - def RANDOM(self):
3170 return 'dbms_random.value'
3171
3172 - def NOT_NULL(self,default,field_type):
3173 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3174
3175 - def _drop(self,table,mode):
3176 sequence_name = table._sequence_name 3177 return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP SEQUENCE %s;' % sequence_name]
3178
3179 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3180 if limitby: 3181 (lmin, lmax) = limitby 3182 if len(sql_w) > 1: 3183 sql_w_row = sql_w + ' AND w_row > %i' % lmin 3184 else: 3185 sql_w_row = 'WHERE w_row > %i' % lmin 3186 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 3187 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3188
3189 - def constraint_name(self, tablename, fieldname):
3190 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 3191 if len(constraint_name)>30: 3192 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 3193 return constraint_name
3194
3195 - def represent_exceptions(self, obj, fieldtype):
3196 if fieldtype == 'blob': 3197 obj = base64.b64encode(str(obj)) 3198 return ":CLOB('%s')" % obj 3199 elif fieldtype == 'date': 3200 if isinstance(obj, (datetime.date, datetime.datetime)): 3201 obj = obj.isoformat()[:10] 3202 else: 3203 obj = str(obj) 3204 return "to_date('%s','yyyy-mm-dd')" % obj 3205 elif fieldtype == 'datetime': 3206 if isinstance(obj, datetime.datetime): 3207 obj = obj.isoformat()[:19].replace('T',' ') 3208 elif isinstance(obj, datetime.date): 3209 obj = obj.isoformat()[:10]+' 00:00:00' 3210 else: 3211 obj = str(obj) 3212 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 3213 return None
3214
3215 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3216 credential_decoder=IDENTITY, driver_args={}, 3217 adapter_args={}, do_connect=True, after_connection=None):
3218 self.db = db 3219 self.dbengine = "oracle" 3220 self.uri = uri 3221 if do_connect: self.find_driver(adapter_args,uri) 3222 self.pool_size = pool_size 3223 self.folder = folder 3224 self.db_codec = db_codec 3225 self._after_connection = after_connection 3226 self.find_or_make_work_folder() 3227 ruri = uri.split('://',1)[1] 3228 if not 'threaded' in driver_args: 3229 driver_args['threaded']=True 3230 def connector(uri=ruri,driver_args=driver_args): 3231 return self.driver.connect(uri,**driver_args)
3232 self.connector = connector 3233 if do_connect: self.reconnect()
3234
3235 - def after_connection(self):
3236 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 3237 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
3238 3239 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 3240
3241 - def execute(self, command, args=None):
3242 args = args or [] 3243 i = 1 3244 while True: 3245 m = self.oracle_fix.match(command) 3246 if not m: 3247 break 3248 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 3249 args.append(m.group('clob')[6:-2].replace("''", "'")) 3250 i += 1 3251 if command[-1:]==';': 3252 command = command[:-1] 3253 return self.log_execute(command, args)
3254
3255 - def create_sequence_and_triggers(self, query, table, **args):
3256 tablename = table._tablename 3257 id_name = table._id.name 3258 sequence_name = table._sequence_name 3259 trigger_name = table._trigger_name 3260 self.execute(query) 3261 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 3262 self.execute(""" 3263 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 3264 DECLARE 3265 curr_val NUMBER; 3266 diff_val NUMBER; 3267 PRAGMA autonomous_transaction; 3268 BEGIN 3269 IF :NEW.%(id)s IS NOT NULL THEN 3270 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3271 diff_val := :NEW.%(id)s - curr_val - 1; 3272 IF diff_val != 0 THEN 3273 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3274 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3275 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3276 END IF; 3277 END IF; 3278 SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL; 3279 END; 3280 """ % dict(trigger_name=trigger_name, tablename=tablename, 3281 sequence_name=sequence_name,id=id_name))
3282
3283 - def lastrowid(self,table):
3284 sequence_name = table._sequence_name 3285 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3286 return long(self.cursor.fetchone()[0])
3287 3288 #def parse_value(self, value, field_type, blob_decode=True): 3289 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3290 # try: 3291 # value = value.read() 3292 # except self.driver.ProgrammingError: 3293 # # After a subsequent fetch the LOB value is not valid anymore 3294 # pass 3295 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3296
3297 - def _fetchall(self):
3298 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3299 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3300 for c in r]) for r in self.cursor] 3301 else: 3302 return self.cursor.fetchall()
3303
3304 - def sqlsafe_table(self, tablename, ot=None):
3305 if ot is not None: 3306 return (self.QUOTE_TEMPLATE + ' ' \ 3307 + self.QUOTE_TEMPLATE) % (ot, tablename) 3308 return self.QUOTE_TEMPLATE % tablename
3309
3310 3311 -class MSSQLAdapter(BaseAdapter):
3312 drivers = ('pyodbc',) 3313 T_SEP = 'T' 3314 3315 QUOTE_TEMPLATE = '"%s"' 3316 3317 types = { 3318 'boolean': 'BIT', 3319 'string': 'VARCHAR(%(length)s)', 3320 'text': 'TEXT', 3321 'json': 'TEXT', 3322 'password': 'VARCHAR(%(length)s)', 3323 'blob': 'IMAGE', 3324 'upload': 'VARCHAR(%(length)s)', 3325 'integer': 'INT', 3326 'bigint': 'BIGINT', 3327 'float': 'FLOAT', 3328 'double': 'FLOAT', 3329 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3330 'date': 'DATETIME', 3331 'time': 'CHAR(8)', 3332 'datetime': 'DATETIME', 3333 'id': 'INT IDENTITY PRIMARY KEY', 3334 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3335 'list:integer': 'TEXT', 3336 'list:string': 'TEXT', 3337 'list:reference': 'TEXT', 3338 'geometry': 'geometry', 3339 'geography': 'geography', 3340 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3341 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3342 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3343 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3344 } 3345
3346 - def concat_add(self,tablename):
3347 return '; ALTER TABLE %s ADD ' % tablename
3348
3349 - def varquote(self,name):
3350 return varquote_aux(name,'[%s]')
3351
3352 - def EXTRACT(self,field,what):
3353 return "DATEPART(%s,%s)" % (what, self.expand(field))
3354
3355 - def LEFT_JOIN(self):
3356 return 'LEFT OUTER JOIN'
3357
3358 - def RANDOM(self):
3359 return 'NEWID()'
3360
3361 - def ALLOW_NULL(self):
3362 return ' NULL'
3363
3364 - def CAST(self, first, second):
3365 return first # apparently no cast necessary in MSSQL
3366
3367 - def SUBSTRING(self,field,parameters):
3368 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3369
3370 - def PRIMARY_KEY(self,key):
3371 return 'PRIMARY KEY CLUSTERED (%s)' % key
3372
3373 - def AGGREGATE(self, first, what):
3374 if what == 'LENGTH': 3375 what = 'LEN' 3376 return "%s(%s)" % (what, self.expand(first))
3377 3378
3379 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3380 if limitby: 3381 (lmin, lmax) = limitby 3382 sql_s += ' TOP %i' % lmax 3383 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3384 3385 TRUE = 1 3386 FALSE = 0 3387 3388 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3389 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3390 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3391
3392 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3393 credential_decoder=IDENTITY, driver_args={}, 3394 adapter_args={}, do_connect=True, srid=4326, 3395 after_connection=None):
3396 self.db = db 3397 self.dbengine = "mssql" 3398 self.uri = uri 3399 if do_connect: self.find_driver(adapter_args,uri) 3400 self.pool_size = pool_size 3401 self.folder = folder 3402 self.db_codec = db_codec 3403 self._after_connection = after_connection 3404 self.srid = srid 3405 self.find_or_make_work_folder() 3406 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3407 ruri = uri.split('://',1)[1] 3408 if '@' not in ruri: 3409 try: 3410 m = self.REGEX_DSN.match(ruri) 3411 if not m: 3412 raise SyntaxError( 3413 'Parsing uri string(%s) has no result' % self.uri) 3414 dsn = m.group('dsn') 3415 if not dsn: 3416 raise SyntaxError('DSN required') 3417 except SyntaxError: 3418 e = sys.exc_info()[1] 3419 LOGGER.error('NdGpatch error') 3420 raise e 3421 # was cnxn = 'DSN=%s' % dsn 3422 cnxn = dsn 3423 else: 3424 m = self.REGEX_URI.match(ruri) 3425 if not m: 3426 raise SyntaxError( 3427 "Invalid URI string in DAL: %s" % self.uri) 3428 user = credential_decoder(m.group('user')) 3429 if not user: 3430 raise SyntaxError('User required') 3431 password = credential_decoder(m.group('password')) 3432 if not password: 3433 password = '' 3434 host = m.group('host') 3435 if not host: 3436 raise SyntaxError('Host name required') 3437 db = m.group('db') 3438 if not db: 3439 raise SyntaxError('Database name required') 3440 port = m.group('port') or '1433' 3441 # Parse the optional url name-value arg pairs after the '?' 3442 # (in the form of arg1=value1&arg2=value2&...) 3443 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3444 argsdict = { 'DRIVER':'{SQL Server}' } 3445 urlargs = m.group('urlargs') or '' 3446 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3447 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3448 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3449 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3450 % (host, port, db, user, password, urlargs) 3451 def connector(cnxn=cnxn,driver_args=driver_args): 3452 return self.driver.connect(cnxn,**driver_args)
3453 self.connector = connector 3454 if do_connect: self.reconnect()
3455
3456 - def lastrowid(self,table):
3457 #self.execute('SELECT @@IDENTITY;') 3458 self.execute('SELECT SCOPE_IDENTITY();') 3459 return long(self.cursor.fetchone()[0])
3460
3461 - def rowslice(self,rows,minimum=0,maximum=None):
3462 if maximum is None: 3463 return rows[minimum:] 3464 return rows[minimum:maximum]
3465
3466 - def EPOCH(self, first):
3467 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3468
3469 - def CONCAT(self, *items):
3470 return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
3471 3472 # GIS Spatial Extensions 3473 3474 # No STAsGeoJSON in MSSQL 3475
3476 - def ST_ASTEXT(self, first):
3477 return '%s.STAsText()' %(self.expand(first))
3478
3479 - def ST_CONTAINS(self, first, second):
3480 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3481
3482 - def ST_DISTANCE(self, first, second):
3483 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3484
3485 - def ST_EQUALS(self, first, second):
3486 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3487
3488 - def ST_INTERSECTS(self, first, second):
3489 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3490
3491 - def ST_OVERLAPS(self, first, second):
3492 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3493 3494 # no STSimplify in MSSQL 3495
3496 - def ST_TOUCHES(self, first, second):
3497 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3498
3499 - def ST_WITHIN(self, first, second):
3500 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3501
3502 - def represent(self, obj, fieldtype):
3503 field_is_type = fieldtype.startswith 3504 if field_is_type('geometry'): 3505 srid = 0 # MS SQL default srid for geometry 3506 geotype, parms = fieldtype[:-1].split('(') 3507 if parms: 3508 srid = parms 3509 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3510 elif fieldtype == 'geography': 3511 srid = 4326 # MS SQL default srid for geography 3512 geotype, parms = fieldtype[:-1].split('(') 3513 if parms: 3514 srid = parms 3515 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3516 # else: 3517 # raise SyntaxError('Invalid field type %s' %fieldtype) 3518 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3519 return BaseAdapter.represent(self, obj, fieldtype)
3520
3521 3522 -class MSSQL3Adapter(MSSQLAdapter):
3523 """ experimental support for pagination in MSSQL"""
3524 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3525 if limitby: 3526 (lmin, lmax) = limitby 3527 if lmin == 0: 3528 sql_s += ' TOP %i' % lmax 3529 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3530 lmin += 1 3531 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3532 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3533 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3534 sql_f_inner = [f for f in sql_f.split(',')] 3535 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3536 sql_f_iproxy = ', '.join(sql_f_iproxy) 3537 sql_f_oproxy = ', '.join(sql_f_outer) 3538 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3539 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3540 - def rowslice(self,rows,minimum=0,maximum=None):
3541 return rows
3542
3543 -class MSSQL4Adapter(MSSQLAdapter):
3544 """ support for true pagination in MSSQL >= 2012""" 3545
3546 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3547 if limitby: 3548 (lmin, lmax) = limitby 3549 if lmin == 0: 3550 #top is still slightly faster, especially because 3551 #web2py's default to fetch references is to not specify 3552 #an orderby clause 3553 sql_s += ' TOP %i' % lmax 3554 else: 3555 if not sql_o: 3556 #if there is no orderby, we can't use the brand new statements 3557 #that being said, developer chose its own poison, so be it random 3558 sql_o += ' ORDER BY %s' % self.RANDOM() 3559 sql_o += ' OFFSET %i ROWS FETCH NEXT %i ROWS ONLY' % (lmin, lmax - lmin) 3560 return 'SELECT %s %s FROM %s%s%s;' % \ 3561 (sql_s, sql_f, sql_t, sql_w, sql_o)
3562
3563 - def rowslice(self,rows,minimum=0,maximum=None):
3564 return rows
3565
3566 -class MSSQL2Adapter(MSSQLAdapter):
3567 drivers = ('pyodbc',) 3568 3569 types = { 3570 'boolean': 'CHAR(1)', 3571 'string': 'NVARCHAR(%(length)s)', 3572 'text': 'NTEXT', 3573 'json': 'NTEXT', 3574 'password': 'NVARCHAR(%(length)s)', 3575 'blob': 'IMAGE', 3576 'upload': 'NVARCHAR(%(length)s)', 3577 'integer': 'INT', 3578 'bigint': 'BIGINT', 3579 'float': 'FLOAT', 3580 'double': 'FLOAT', 3581 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3582 'date': 'DATETIME', 3583 'time': 'CHAR(8)', 3584 'datetime': 'DATETIME', 3585 'id': 'INT IDENTITY PRIMARY KEY', 3586 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3587 'list:integer': 'NTEXT', 3588 'list:string': 'NTEXT', 3589 'list:reference': 'NTEXT', 3590 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3591 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3592 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3593 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3594 } 3595
3596 - def represent(self, obj, fieldtype):
3597 value = BaseAdapter.represent(self, obj, fieldtype) 3598 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3599 value = 'N'+value 3600 return value
3601
3602 - def execute(self,a):
3603 return self.log_execute(a.decode('utf8'))
3604
3605 -class VerticaAdapter(MSSQLAdapter):
3606 drivers = ('pyodbc',) 3607 T_SEP = ' ' 3608 3609 types = { 3610 'boolean': 'BOOLEAN', 3611 'string': 'VARCHAR(%(length)s)', 3612 'text': 'BYTEA', 3613 'json': 'VARCHAR(%(length)s)', 3614 'password': 'VARCHAR(%(length)s)', 3615 'blob': 'BYTEA', 3616 'upload': 'VARCHAR(%(length)s)', 3617 'integer': 'INT', 3618 'bigint': 'BIGINT', 3619 'float': 'FLOAT', 3620 'double': 'DOUBLE PRECISION', 3621 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3622 'date': 'DATE', 3623 'time': 'TIME', 3624 'datetime': 'DATETIME', 3625 'id': 'IDENTITY', 3626 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3627 'list:integer': 'BYTEA', 3628 'list:string': 'BYTEA', 3629 'list:reference': 'BYTEA', 3630 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3631 } 3632 3633
3634 - def EXTRACT(self, first, what):
3635 return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
3636
3637 - def _truncate(self, table, mode=''):
3638 tablename = table._tablename 3639 return ['TRUNCATE %s %s;' % (tablename, mode or '')]
3640
3641 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3642 if limitby: 3643 (lmin, lmax) = limitby 3644 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 3645 return 'SELECT %s %s FROM %s%s%s;' % \ 3646 (sql_s, sql_f, sql_t, sql_w, sql_o)
3647
3648 - def lastrowid(self,table):
3649 self.execute('SELECT LAST_INSERT_ID();') 3650 return long(self.cursor.fetchone()[0])
3651
3652 - def execute(self, a):
3653 return self.log_execute(a)
3654
3655 -class SybaseAdapter(MSSQLAdapter):
3656 drivers = ('Sybase',) 3657 3658 types = { 3659 'boolean': 'BIT', 3660 'string': 'CHAR VARYING(%(length)s)', 3661 'text': 'TEXT', 3662 'json': 'TEXT', 3663 'password': 'CHAR VARYING(%(length)s)', 3664 'blob': 'IMAGE', 3665 'upload': 'CHAR VARYING(%(length)s)', 3666 'integer': 'INT', 3667 'bigint': 'BIGINT', 3668 'float': 'FLOAT', 3669 'double': 'FLOAT', 3670 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3671 'date': 'DATETIME', 3672 'time': 'CHAR(8)', 3673 'datetime': 'DATETIME', 3674 'id': 'INT IDENTITY PRIMARY KEY', 3675 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3676 'list:integer': 'TEXT', 3677 'list:string': 'TEXT', 3678 'list:reference': 'TEXT', 3679 'geometry': 'geometry', 3680 'geography': 'geography', 3681 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3682 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3683 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3684 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3685 } 3686 3687
3688 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3689 credential_decoder=IDENTITY, driver_args={}, 3690 adapter_args={}, do_connect=True, srid=4326, 3691 after_connection=None):
3692 self.db = db 3693 self.dbengine = "sybase" 3694 self.uri = uri 3695 if do_connect: self.find_driver(adapter_args,uri) 3696 self.pool_size = pool_size 3697 self.folder = folder 3698 self.db_codec = db_codec 3699 self._after_connection = after_connection 3700 self.srid = srid 3701 self.find_or_make_work_folder() 3702 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3703 ruri = uri.split('://',1)[1] 3704 if '@' not in ruri: 3705 try: 3706 m = self.REGEX_DSN.match(ruri) 3707 if not m: 3708 raise SyntaxError( 3709 'Parsing uri string(%s) has no result' % self.uri) 3710 dsn = m.group('dsn') 3711 if not dsn: 3712 raise SyntaxError('DSN required') 3713 except SyntaxError: 3714 e = sys.exc_info()[1] 3715 LOGGER.error('NdGpatch error') 3716 raise e 3717 else: 3718 m = self.REGEX_URI.match(uri) 3719 if not m: 3720 raise SyntaxError( 3721 "Invalid URI string in DAL: %s" % self.uri) 3722 user = credential_decoder(m.group('user')) 3723 if not user: 3724 raise SyntaxError('User required') 3725 password = credential_decoder(m.group('password')) 3726 if not password: 3727 password = '' 3728 host = m.group('host') 3729 if not host: 3730 raise SyntaxError('Host name required') 3731 db = m.group('db') 3732 if not db: 3733 raise SyntaxError('Database name required') 3734 port = m.group('port') or '1433' 3735 3736 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3737 3738 driver_args.update(user = credential_decoder(user), 3739 password = credential_decoder(password)) 3740 3741 def connector(dsn=dsn,driver_args=driver_args): 3742 return self.driver.connect(dsn,**driver_args)
3743 self.connector = connector 3744 if do_connect: self.reconnect()
3745
3746 3747 -class FireBirdAdapter(BaseAdapter):
3748 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3749 3750 commit_on_alter_table = False 3751 support_distributed_transaction = True 3752 types = { 3753 'boolean': 'CHAR(1)', 3754 'string': 'VARCHAR(%(length)s)', 3755 'text': 'BLOB SUB_TYPE 1', 3756 'json': 'BLOB SUB_TYPE 1', 3757 'password': 'VARCHAR(%(length)s)', 3758 'blob': 'BLOB SUB_TYPE 0', 3759 'upload': 'VARCHAR(%(length)s)', 3760 'integer': 'INTEGER', 3761 'bigint': 'BIGINT', 3762 'float': 'FLOAT', 3763 'double': 'DOUBLE PRECISION', 3764 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3765 'date': 'DATE', 3766 'time': 'TIME', 3767 'datetime': 'TIMESTAMP', 3768 'id': 'INTEGER PRIMARY KEY', 3769 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3770 'list:integer': 'BLOB SUB_TYPE 1', 3771 'list:string': 'BLOB SUB_TYPE 1', 3772 'list:reference': 'BLOB SUB_TYPE 1', 3773 'big-id': 'BIGINT PRIMARY KEY', 3774 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3775 } 3776
3777 - def sequence_name(self,tablename):
3778 return ('genid_' + self.QUOTE_TEMPLATE) % tablename
3779
3780 - def trigger_name(self,tablename):
3781 return 'trg_id_%s' % tablename
3782
3783 - def RANDOM(self):
3784 return 'RAND()'
3785
3786 - def EPOCH(self, first):
3787 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3788
3789 - def NOT_NULL(self,default,field_type):
3790 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3791
3792 - def SUBSTRING(self,field,parameters):
3793 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3794
3795 - def LENGTH(self, first):
3796 return "CHAR_LENGTH(%s)" % self.expand(first)
3797
3798 - def CONTAINS(self,first,second,case_sensitive=False):
3799 if first.type.startswith('list:'): 3800 second = Expression(None,self.CONCAT('|',Expression( 3801 None,self.REPLACE(second,('|','||'))),'|')) 3802 return '(%s CONTAINING %s)' % (self.expand(first), 3803 self.expand(second, 'string'))
3804
3805 - def _drop(self,table,mode):
3806 sequence_name = table._sequence_name 3807 return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP GENERATOR %s;' % sequence_name]
3808
3809 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3810 if limitby: 3811 (lmin, lmax) = limitby 3812 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3813 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3814
3815 - def _truncate(self,table,mode = ''):
3816 return ['DELETE FROM %s;' % table._tablename, 3817 'SET GENERATOR %s TO 0;' % table._sequence_name]
3818 3819 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3820
3821 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3822 credential_decoder=IDENTITY, driver_args={}, 3823 adapter_args={}, do_connect=True, after_connection=None):
3824 self.db = db 3825 self.dbengine = "firebird" 3826 self.uri = uri 3827 if do_connect: self.find_driver(adapter_args,uri) 3828 self.pool_size = pool_size 3829 self.folder = folder 3830 self.db_codec = db_codec 3831 self._after_connection = after_connection 3832 self.find_or_make_work_folder() 3833 ruri = uri.split('://',1)[1] 3834 m = self.REGEX_URI.match(ruri) 3835 if not m: 3836 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3837 user = credential_decoder(m.group('user')) 3838 if not user: 3839 raise SyntaxError('User required') 3840 password = credential_decoder(m.group('password')) 3841 if not password: 3842 password = '' 3843 host = m.group('host') 3844 if not host: 3845 raise SyntaxError('Host name required') 3846 port = int(m.group('port') or 3050) 3847 db = m.group('db') 3848 if not db: 3849 raise SyntaxError('Database name required') 3850 charset = m.group('charset') or 'UTF8' 3851 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3852 user = credential_decoder(user), 3853 password = credential_decoder(password), 3854 charset = charset) 3855 3856 def connector(driver_args=driver_args): 3857 return self.driver.connect(**driver_args)
3858 self.connector = connector 3859 if do_connect: self.reconnect()
3860
3861 - def create_sequence_and_triggers(self, query, table, **args):
3862 tablename = table._tablename 3863 sequence_name = table._sequence_name 3864 trigger_name = table._trigger_name 3865 self.execute(query) 3866 self.execute('create generator %s;' % sequence_name) 3867 self.execute('set generator %s to 0;' % sequence_name) 3868 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3869
3870 - def lastrowid(self,table):
3871 sequence_name = table._sequence_name 3872 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3873 return long(self.cursor.fetchone()[0])
3874
3875 3876 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3877 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3878 3879 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3880
3881 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3882 credential_decoder=IDENTITY, driver_args={}, 3883 adapter_args={}, do_connect=True, after_connection=None):
3884 self.db = db 3885 self.dbengine = "firebird" 3886 self.uri = uri 3887 if do_connect: self.find_driver(adapter_args,uri) 3888 self.pool_size = pool_size 3889 self.folder = folder 3890 self.db_codec = db_codec 3891 self._after_connection = after_connection 3892 self.find_or_make_work_folder() 3893 ruri = uri.split('://',1)[1] 3894 m = self.REGEX_URI.match(ruri) 3895 if not m: 3896 raise SyntaxError( 3897 "Invalid URI string in DAL: %s" % self.uri) 3898 user = credential_decoder(m.group('user')) 3899 if not user: 3900 raise SyntaxError('User required') 3901 password = credential_decoder(m.group('password')) 3902 if not password: 3903 password = '' 3904 pathdb = m.group('path') 3905 if not pathdb: 3906 raise SyntaxError('Path required') 3907 charset = m.group('charset') 3908 if not charset: 3909 charset = 'UTF8' 3910 host = '' 3911 driver_args.update(host=host, 3912 database=pathdb, 3913 user=credential_decoder(user), 3914 password=credential_decoder(password), 3915 charset=charset) 3916 3917 def connector(driver_args=driver_args): 3918 return self.driver.connect(**driver_args)
3919 self.connector = connector 3920 if do_connect: self.reconnect()
3921
3922 -class InformixAdapter(BaseAdapter):
3923 drivers = ('informixdb',) 3924 3925 types = { 3926 'boolean': 'CHAR(1)', 3927 'string': 'VARCHAR(%(length)s)', 3928 'text': 'BLOB SUB_TYPE 1', 3929 'json': 'BLOB SUB_TYPE 1', 3930 'password': 'VARCHAR(%(length)s)', 3931 'blob': 'BLOB SUB_TYPE 0', 3932 'upload': 'VARCHAR(%(length)s)', 3933 'integer': 'INTEGER', 3934 'bigint': 'BIGINT', 3935 'float': 'FLOAT', 3936 'double': 'DOUBLE PRECISION', 3937 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3938 'date': 'DATE', 3939 'time': 'CHAR(8)', 3940 'datetime': 'DATETIME', 3941 'id': 'SERIAL', 3942 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3943 'list:integer': 'BLOB SUB_TYPE 1', 3944 'list:string': 'BLOB SUB_TYPE 1', 3945 'list:reference': 'BLOB SUB_TYPE 1', 3946 'big-id': 'BIGSERIAL', 3947 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3948 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3949 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3950 } 3951
3952 - def RANDOM(self):
3953 return 'Random()'
3954
3955 - def NOT_NULL(self,default,field_type):
3956 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3957
3958 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3959 if limitby: 3960 (lmin, lmax) = limitby 3961 fetch_amt = lmax - lmin 3962 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3963 if lmin and (dbms_version >= 10): 3964 # Requires Informix 10.0+ 3965 sql_s += ' SKIP %d' % (lmin, ) 3966 if fetch_amt and (dbms_version >= 9): 3967 # Requires Informix 9.0+ 3968 sql_s += ' FIRST %d' % (fetch_amt, ) 3969 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3970
3971 - def represent_exceptions(self, obj, fieldtype):
3972 if fieldtype == 'date': 3973 if isinstance(obj, (datetime.date, datetime.datetime)): 3974 obj = obj.isoformat()[:10] 3975 else: 3976 obj = str(obj) 3977 return "to_date('%s','%%Y-%%m-%%d')" % obj 3978 elif fieldtype == 'datetime': 3979 if isinstance(obj, datetime.datetime): 3980 obj = obj.isoformat()[:19].replace('T',' ') 3981 elif isinstance(obj, datetime.date): 3982 obj = obj.isoformat()[:10]+' 00:00:00' 3983 else: 3984 obj = str(obj) 3985 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3986 return None
3987 3988 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3989
3990 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3991 credential_decoder=IDENTITY, driver_args={}, 3992 adapter_args={}, do_connect=True, after_connection=None):
3993 self.db = db 3994 self.dbengine = "informix" 3995 self.uri = uri 3996 if do_connect: self.find_driver(adapter_args,uri) 3997 self.pool_size = pool_size 3998 self.folder = folder 3999 self.db_codec = db_codec 4000 self._after_connection = after_connection 4001 self.find_or_make_work_folder() 4002 ruri = uri.split('://',1)[1] 4003 m = self.REGEX_URI.match(ruri) 4004 if not m: 4005 raise SyntaxError( 4006 "Invalid URI string in DAL: %s" % self.uri) 4007 user = credential_decoder(m.group('user')) 4008 if not user: 4009 raise SyntaxError('User required') 4010 password = credential_decoder(m.group('password')) 4011 if not password: 4012 password = '' 4013 host = m.group('host') 4014 if not host: 4015 raise SyntaxError('Host name required') 4016 db = m.group('db') 4017 if not db: 4018 raise SyntaxError('Database name required') 4019 user = credential_decoder(user) 4020 password = credential_decoder(password) 4021 dsn = '%s@%s' % (db,host) 4022 driver_args.update(user=user,password=password,autocommit=True) 4023 def connector(dsn=dsn,driver_args=driver_args): 4024 return self.driver.connect(dsn,**driver_args)
4025 self.connector = connector 4026 if do_connect: self.reconnect()
4027
4028 - def execute(self,command):
4029 if command[-1:]==';': 4030 command = command[:-1] 4031 return self.log_execute(command)
4032
4033 - def lastrowid(self,table):
4034 return self.cursor.sqlerrd[1]
4035
4036 -class InformixSEAdapter(InformixAdapter):
4037 """ work in progress """ 4038
4039 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4040 return 'SELECT %s %s FROM %s%s%s;' % \ 4041 (sql_s, sql_f, sql_t, sql_w, sql_o)
4042
4043 - def rowslice(self,rows,minimum=0,maximum=None):
4044 if maximum is None: 4045 return rows[minimum:] 4046 return rows[minimum:maximum]
4047
4048 -class DB2Adapter(BaseAdapter):
4049 drivers = ('pyodbc',) 4050 4051 types = { 4052 'boolean': 'CHAR(1)', 4053 'string': 'VARCHAR(%(length)s)', 4054 'text': 'CLOB', 4055 'json': 'CLOB', 4056 'password': 'VARCHAR(%(length)s)', 4057 'blob': 'BLOB', 4058 'upload': 'VARCHAR(%(length)s)', 4059 'integer': 'INT', 4060 'bigint': 'BIGINT', 4061 'float': 'REAL', 4062 'double': 'DOUBLE', 4063 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4064 'date': 'DATE', 4065 'time': 'TIME', 4066 'datetime': 'TIMESTAMP', 4067 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 4068 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4069 'list:integer': 'CLOB', 4070 'list:string': 'CLOB', 4071 'list:reference': 'CLOB', 4072 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 4073 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4074 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4075 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 4076 } 4077
4078 - def LEFT_JOIN(self):
4079 return 'LEFT OUTER JOIN'
4080
4081 - def RANDOM(self):
4082 return 'RAND()'
4083
4084 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4085 if limitby: 4086 (lmin, lmax) = limitby 4087 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 4088 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4089
4090 - def represent_exceptions(self, obj, fieldtype):
4091 if fieldtype == 'blob': 4092 obj = base64.b64encode(str(obj)) 4093 return "BLOB('%s')" % obj 4094 elif fieldtype == 'datetime': 4095 if isinstance(obj, datetime.datetime): 4096 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 4097 elif isinstance(obj, datetime.date): 4098 obj = obj.isoformat()[:10]+'-00.00.00' 4099 return "'%s'" % obj 4100 return None
4101
4102 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4103 credential_decoder=IDENTITY, driver_args={}, 4104 adapter_args={}, do_connect=True, after_connection=None):
4105 self.db = db 4106 self.dbengine = "db2" 4107 self.uri = uri 4108 if do_connect: self.find_driver(adapter_args,uri) 4109 self.pool_size = pool_size 4110 self.folder = folder 4111 self.db_codec = db_codec 4112 self._after_connection = after_connection 4113 self.find_or_make_work_folder() 4114 ruri = uri.split('://', 1)[1] 4115 def connector(cnxn=ruri,driver_args=driver_args): 4116 return self.driver.connect(cnxn,**driver_args)
4117 self.connector = connector 4118 if do_connect: self.reconnect()
4119
4120 - def execute(self,command):
4121 if command[-1:]==';': 4122 command = command[:-1] 4123 return self.log_execute(command)
4124
4125 - def lastrowid(self,table):
4126 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 4127 return long(self.cursor.fetchone()[0])
4128
4129 - def rowslice(self,rows,minimum=0,maximum=None):
4130 if maximum is None: 4131 return rows[minimum:] 4132 return rows[minimum:maximum]
4133
4134 4135 -class TeradataAdapter(BaseAdapter):
4136 drivers = ('pyodbc',) 4137 4138 types = { 4139 'boolean': 'CHAR(1)', 4140 'string': 'VARCHAR(%(length)s)', 4141 'text': 'VARCHAR(2000)', 4142 'json': 'VARCHAR(4000)', 4143 'password': 'VARCHAR(%(length)s)', 4144 'blob': 'BLOB', 4145 'upload': 'VARCHAR(%(length)s)', 4146 'integer': 'INT', 4147 'bigint': 'BIGINT', 4148 'float': 'REAL', 4149 'double': 'DOUBLE', 4150 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4151 'date': 'DATE', 4152 'time': 'TIME', 4153 'datetime': 'TIMESTAMP', 4154 # Modified Constraint syntax for Teradata. 4155 # Teradata does not support ON DELETE. 4156 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 4157 'reference': 'INT', 4158 'list:integer': 'VARCHAR(4000)', 4159 'list:string': 'VARCHAR(4000)', 4160 'list:reference': 'VARCHAR(4000)', 4161 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 4162 'big-reference': 'BIGINT', 4163 'reference FK': ' REFERENCES %(foreign_key)s', 4164 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 4165 } 4166
4167 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4168 credential_decoder=IDENTITY, driver_args={}, 4169 adapter_args={}, do_connect=True, after_connection=None):
4170 self.db = db 4171 self.dbengine = "teradata" 4172 self.uri = uri 4173 if do_connect: self.find_driver(adapter_args,uri) 4174 self.pool_size = pool_size 4175 self.folder = folder 4176 self.db_codec = db_codec 4177 self._after_connection = after_connection 4178 self.find_or_make_work_folder() 4179 ruri = uri.split('://', 1)[1] 4180 def connector(cnxn=ruri,driver_args=driver_args): 4181 return self.driver.connect(cnxn,**driver_args)
4182 self.connector = connector 4183 if do_connect: self.reconnect()
4184
4185 - def close(self,action='commit',really=True):
4186 # Teradata does not implicitly close off the cursor 4187 # leading to SQL_ACTIVE_STATEMENTS limit errors 4188 self.cursor.close() 4189 ConnectionPool.close(self, action, really)
4190
4191 - def LEFT_JOIN(self):
4192 return 'LEFT OUTER JOIN'
4193 4194 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
4195 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4196 if limitby: 4197 (lmin, lmax) = limitby 4198 sql_s += ' TOP %i' % lmax 4199 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4200
4201 - def _truncate(self, table, mode=''):
4202 tablename = table._tablename 4203 return ['DELETE FROM %s ALL;' % (tablename)]
4204 4205 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
4206 # (ANSI-SQL wants this form of name 4207 # to be a delimited identifier) 4208 4209 -class IngresAdapter(BaseAdapter):
4210 drivers = ('pyodbc',) 4211 4212 types = { 4213 'boolean': 'CHAR(1)', 4214 'string': 'VARCHAR(%(length)s)', 4215 'text': 'CLOB', 4216 'json': 'CLOB', 4217 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4218 'blob': 'BLOB', 4219 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4220 'integer': 'INTEGER4', # or int8... 4221 'bigint': 'BIGINT', 4222 'float': 'FLOAT', 4223 'double': 'FLOAT8', 4224 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4225 'date': 'ANSIDATE', 4226 'time': 'TIME WITHOUT TIME ZONE', 4227 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4228 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 4229 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4230 'list:integer': 'CLOB', 4231 'list:string': 'CLOB', 4232 'list:reference': 'CLOB', 4233 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 4234 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4235 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4236 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4237 } 4238
4239 - def LEFT_JOIN(self):
4240 return 'LEFT OUTER JOIN'
4241
4242 - def RANDOM(self):
4243 return 'RANDOM()'
4244
4245 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4246 if limitby: 4247 (lmin, lmax) = limitby 4248 fetch_amt = lmax - lmin 4249 if fetch_amt: 4250 sql_s += ' FIRST %d ' % (fetch_amt, ) 4251 if lmin: 4252 # Requires Ingres 9.2+ 4253 sql_o += ' OFFSET %d' % (lmin, ) 4254 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4255
4256 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4257 credential_decoder=IDENTITY, driver_args={}, 4258 adapter_args={}, do_connect=True, after_connection=None):
4259 self.db = db 4260 self.dbengine = "ingres" 4261 self._driver = pyodbc 4262 self.uri = uri 4263 if do_connect: self.find_driver(adapter_args,uri) 4264 self.pool_size = pool_size 4265 self.folder = folder 4266 self.db_codec = db_codec 4267 self._after_connection = after_connection 4268 self.find_or_make_work_folder() 4269 connstr = uri.split(':', 1)[1] 4270 # Simple URI processing 4271 connstr = connstr.lstrip() 4272 while connstr.startswith('/'): 4273 connstr = connstr[1:] 4274 if '=' in connstr: 4275 # Assume we have a regular ODBC connection string and just use it 4276 ruri = connstr 4277 else: 4278 # Assume only (local) dbname is passed in with OS auth 4279 database_name = connstr 4280 default_driver_name = 'Ingres' 4281 vnode = '(local)' 4282 servertype = 'ingres' 4283 ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) 4284 def connector(cnxn=ruri,driver_args=driver_args): 4285 return self.driver.connect(cnxn,**driver_args)
4286 4287 self.connector = connector 4288 4289 # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns 4290 if do_connect: self.reconnect()
4291
4292 - def create_sequence_and_triggers(self, query, table, **args):
4293 # post create table auto inc code (if needed) 4294 # modify table to btree for performance.... 4295 # Older Ingres releases could use rule/trigger like Oracle above. 4296 if hasattr(table,'_primarykey'): 4297 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 4298 (table._tablename, 4299 ', '.join(["'%s'" % x for x in table.primarykey])) 4300 self.execute(modify_tbl_sql) 4301 else: 4302 tmp_seqname='%s_iisq' % table._tablename 4303 query=query.replace(INGRES_SEQNAME, tmp_seqname) 4304 self.execute('create sequence %s' % tmp_seqname) 4305 self.execute(query) 4306 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
4307 4308
4309 - def lastrowid(self,table):
4310 tmp_seqname='%s_iisq' % table 4311 self.execute('select current value for %s' % tmp_seqname) 4312 return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
4313
4314 4315 -class IngresUnicodeAdapter(IngresAdapter):
4316 4317 drivers = ('pyodbc',) 4318 4319 types = { 4320 'boolean': 'CHAR(1)', 4321 'string': 'NVARCHAR(%(length)s)', 4322 'text': 'NCLOB', 4323 'json': 'NCLOB', 4324 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4325 'blob': 'BLOB', 4326 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4327 'integer': 'INTEGER4', # or int8... 4328 'bigint': 'BIGINT', 4329 'float': 'FLOAT', 4330 'double': 'FLOAT8', 4331 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4332 'date': 'ANSIDATE', 4333 'time': 'TIME WITHOUT TIME ZONE', 4334 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4335 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 4336 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4337 'list:integer': 'NCLOB', 4338 'list:string': 'NCLOB', 4339 'list:reference': 'NCLOB', 4340 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 4341 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4342 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4343 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4344 }
4345
4346 -class SAPDBAdapter(BaseAdapter):
4347 drivers = ('sapdb',) 4348 4349 support_distributed_transaction = False 4350 types = { 4351 'boolean': 'CHAR(1)', 4352 'string': 'VARCHAR(%(length)s)', 4353 'text': 'LONG', 4354 'json': 'LONG', 4355 'password': 'VARCHAR(%(length)s)', 4356 'blob': 'LONG', 4357 'upload': 'VARCHAR(%(length)s)', 4358 'integer': 'INT', 4359 'bigint': 'BIGINT', 4360 'float': 'FLOAT', 4361 'double': 'DOUBLE PRECISION', 4362 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4363 'date': 'DATE', 4364 'time': 'TIME', 4365 'datetime': 'TIMESTAMP', 4366 'id': 'INT PRIMARY KEY', 4367 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4368 'list:integer': 'LONG', 4369 'list:string': 'LONG', 4370 'list:reference': 'LONG', 4371 'big-id': 'BIGINT PRIMARY KEY', 4372 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4373 } 4374
4375 - def sequence_name(self,table):
4376 return (self.QUOTE_TEMPLATE + '_id_Seq') % table
4377
4378 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4379 if limitby: 4380 (lmin, lmax) = limitby 4381 if len(sql_w) > 1: 4382 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4383 else: 4384 sql_w_row = 'WHERE w_row > %i' % lmin 4385 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4386 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4387
4388 - def create_sequence_and_triggers(self, query, table, **args):
4389 # following lines should only be executed if table._sequence_name does not exist 4390 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4391 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4392 % (table._tablename, table._id.name, table._sequence_name)) 4393 self.execute(query)
4394 4395 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4396 4397
4398 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4399 credential_decoder=IDENTITY, driver_args={}, 4400 adapter_args={}, do_connect=True, after_connection=None):
4401 self.db = db 4402 self.dbengine = "sapdb" 4403 self.uri = uri 4404 if do_connect: self.find_driver(adapter_args,uri) 4405 self.pool_size = pool_size 4406 self.folder = folder 4407 self.db_codec = db_codec 4408 self._after_connection = after_connection 4409 self.find_or_make_work_folder() 4410 ruri = uri.split('://',1)[1] 4411 m = self.REGEX_URI.match(ruri) 4412 if not m: 4413 raise SyntaxError("Invalid URI string in DAL") 4414 user = credential_decoder(m.group('user')) 4415 if not user: 4416 raise SyntaxError('User required') 4417 password = credential_decoder(m.group('password')) 4418 if not password: 4419 password = '' 4420 host = m.group('host') 4421 if not host: 4422 raise SyntaxError('Host name required') 4423 db = m.group('db') 4424 if not db: 4425 raise SyntaxError('Database name required') 4426 def connector(user=user, password=password, database=db, 4427 host=host, driver_args=driver_args): 4428 return self.driver.Connection(user, password, database, 4429 host, **driver_args)
4430 self.connector = connector 4431 if do_connect: self.reconnect()
4432
4433 - def lastrowid(self,table):
4434 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4435 return long(self.cursor.fetchone()[0])
4436
4437 -class CubridAdapter(MySQLAdapter):
4438 drivers = ('cubriddb',) 4439 4440 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4441
4442 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4443 credential_decoder=IDENTITY, driver_args={}, 4444 adapter_args={}, do_connect=True, after_connection=None):
4445 self.db = db 4446 self.dbengine = "cubrid" 4447 self.uri = uri 4448 if do_connect: self.find_driver(adapter_args,uri) 4449 self.pool_size = pool_size 4450 self.folder = folder 4451 self.db_codec = db_codec 4452 self._after_connection = after_connection 4453 self.find_or_make_work_folder() 4454 ruri = uri.split('://',1)[1] 4455 m = self.REGEX_URI.match(ruri) 4456 if not m: 4457 raise SyntaxError( 4458 "Invalid URI string in DAL: %s" % self.uri) 4459 user = credential_decoder(m.group('user')) 4460 if not user: 4461 raise SyntaxError('User required') 4462 password = credential_decoder(m.group('password')) 4463 if not password: 4464 password = '' 4465 host = m.group('host') 4466 if not host: 4467 raise SyntaxError('Host name required') 4468 db = m.group('db') 4469 if not db: 4470 raise SyntaxError('Database name required') 4471 port = int(m.group('port') or '30000') 4472 charset = m.group('charset') or 'utf8' 4473 user = credential_decoder(user) 4474 passwd = credential_decoder(password) 4475 def connector(host=host,port=port,db=db, 4476 user=user,passwd=password,driver_args=driver_args): 4477 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4478 self.connector = connector 4479 if do_connect: self.reconnect()
4480
4481 - def after_connection(self):
4482 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4483 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4484
4485 4486 ######## GAE MySQL ########## 4487 4488 -class DatabaseStoredFile:
4489 4490 web2py_filesystem = False 4491
4492 - def escape(self,obj):
4493 return self.db._adapter.escape(obj)
4494
4495 - def __init__(self,db,filename,mode):
4496 if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'): 4497 raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now") 4498 self.db = db 4499 self.filename = filename 4500 self.mode = mode 4501 if not self.web2py_filesystem: 4502 if db._adapter.dbengine == 'mysql': 4503 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4504 elif db._adapter.dbengine in ('postgres', 'sqlite'): 4505 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4506 self.db.executesql(sql) 4507 DatabaseStoredFile.web2py_filesystem = True 4508 self.p=0 4509 self.data = '' 4510 if mode in ('r','rw','a'): 4511 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4512 % filename 4513 rows = self.db.executesql(query) 4514 if rows: 4515 self.data = rows[0][0] 4516 elif exists(filename): 4517 datafile = open(filename, 'r') 4518 try: 4519 self.data = datafile.read() 4520 finally: 4521 datafile.close() 4522 elif mode in ('r','rw'): 4523 raise RuntimeError("File %s does not exist" % filename)
4524
4525 - def read(self, bytes):
4526 data = self.data[self.p:self.p+bytes] 4527 self.p += len(data) 4528 return data
4529
4530 - def readline(self):
4531 i = self.data.find('\n',self.p)+1 4532 if i>0: 4533 data, self.p = self.data[self.p:i], i 4534 else: 4535 data, self.p = self.data[self.p:], len(self.data) 4536 return data
4537
4538 - def write(self,data):
4539 self.data += data
4540
4541 - def close_connection(self):
4542 if self.db is not None: 4543 self.db.executesql( 4544 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4545 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4546 % (self.filename, self.data.replace("'","''")) 4547 self.db.executesql(query) 4548 self.db.commit() 4549 self.db = None
4550
4551 - def close(self):
4552 self.close_connection()
4553 4554 @staticmethod
4555 - def exists(db, filename):
4556 if exists(filename): 4557 return True 4558 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4559 try: 4560 if db.executesql(query): 4561 return True 4562 except Exception, e: 4563 if not (db._adapter.isOperationalError(e) or 4564 db._adapter.isProgrammingError(e)): 4565 raise 4566 # no web2py_filesystem found? 4567 tb = traceback.format_exc() 4568 LOGGER.error("Could not retrieve %s\n%s" % (filename, tb)) 4569 return False
4570
4571 4572 -class UseDatabaseStoredFile:
4573
4574 - def file_exists(self, filename):
4575 return DatabaseStoredFile.exists(self.db,filename)
4576
4577 - def file_open(self, filename, mode='rb', lock=True):
4578 return DatabaseStoredFile(self.db,filename,mode)
4579
4580 - def file_close(self, fileobj):
4581 fileobj.close_connection()
4582
4583 - def file_delete(self,filename):
4584 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4585 self.db.executesql(query) 4586 self.db.commit()
4587
4588 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4589 uploads_in_blob = True 4590 4591 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4592
4593 - def __init__(self, db, uri='google:sql://realm:domain/database', 4594 pool_size=0, folder=None, db_codec='UTF-8', 4595 credential_decoder=IDENTITY, driver_args={}, 4596 adapter_args={}, do_connect=True, after_connection=None):
4597 4598 self.db = db 4599 self.dbengine = "mysql" 4600 self.uri = uri 4601 self.pool_size = pool_size 4602 self.db_codec = db_codec 4603 self._after_connection = after_connection 4604 if do_connect: self.find_driver(adapter_args, uri) 4605 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4606 os.sep+'applications'+os.sep,1)[1]) 4607 ruri = uri.split("://")[1] 4608 m = self.REGEX_URI.match(ruri) 4609 if not m: 4610 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4611 instance = credential_decoder(m.group('instance')) 4612 self.dbstring = db = credential_decoder(m.group('db')) 4613 driver_args['instance'] = instance 4614 if not 'charset' in driver_args: 4615 driver_args['charset'] = 'utf8' 4616 self.createdb = createdb = adapter_args.get('createdb',True) 4617 if not createdb: 4618 driver_args['database'] = db 4619 def connector(driver_args=driver_args): 4620 return rdbms.connect(**driver_args)
4621 self.connector = connector 4622 if do_connect: self.reconnect()
4623
4624 - def after_connection(self):
4625 if self.createdb: 4626 # self.execute('DROP DATABASE %s' % self.dbstring) 4627 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4628 self.execute('USE %s' % self.dbstring) 4629 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4630 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4631
4632 - def execute(self, command, *a, **b):
4633 return self.log_execute(command.decode('utf8'), *a, **b)
4634
4635 - def find_driver(self,adapter_args,uri=None):
4636 self.adapter_args = adapter_args 4637 self.driver = "google"
4638
4639 -class NoSQLAdapter(BaseAdapter):
4640 can_select_for_update = False 4641 QUOTE_TEMPLATE = '%s' 4642 4643 @staticmethod
4644 - def to_unicode(obj):
4645 if isinstance(obj, str): 4646 return obj.decode('utf8') 4647 elif not isinstance(obj, unicode): 4648 return unicode(obj) 4649 return obj
4650
4651 - def id_query(self, table):
4652 return table._id > 0
4653
4654 - def represent(self, obj, fieldtype):
4655 field_is_type = fieldtype.startswith 4656 if isinstance(obj, CALLABLETYPES): 4657 obj = obj() 4658 if isinstance(fieldtype, SQLCustomType): 4659 return fieldtype.encoder(obj) 4660 if isinstance(obj, (Expression, Field)): 4661 raise SyntaxError("non supported on GAE") 4662 if self.dbengine == 'google:datastore': 4663 if isinstance(fieldtype, gae.Property): 4664 return obj 4665 is_string = isinstance(fieldtype,str) 4666 is_list = is_string and field_is_type('list:') 4667 if is_list: 4668 if not obj: 4669 obj = [] 4670 if not isinstance(obj, (list, tuple)): 4671 obj = [obj] 4672 if obj == '' and not \ 4673 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4674 return None 4675 if not obj is None: 4676 if isinstance(obj, list) and not is_list: 4677 obj = [self.represent(o, fieldtype) for o in obj] 4678 elif fieldtype in ('integer','bigint','id'): 4679 obj = long(obj) 4680 elif fieldtype == 'double': 4681 obj = float(obj) 4682 elif is_string and field_is_type('reference'): 4683 if isinstance(obj, (Row, Reference)): 4684 obj = obj['id'] 4685 obj = long(obj) 4686 elif fieldtype == 'boolean': 4687 if obj and not str(obj)[0].upper() in '0F': 4688 obj = True 4689 else: 4690 obj = False 4691 elif fieldtype == 'date': 4692 if not isinstance(obj, datetime.date): 4693 (y, m, d) = map(int,str(obj).strip().split('-')) 4694 obj = datetime.date(y, m, d) 4695 elif isinstance(obj,datetime.datetime): 4696 (y, m, d) = (obj.year, obj.month, obj.day) 4697 obj = datetime.date(y, m, d) 4698 elif fieldtype == 'time': 4699 if not isinstance(obj, datetime.time): 4700 time_items = map(int,str(obj).strip().split(':')[:3]) 4701 if len(time_items) == 3: 4702 (h, mi, s) = time_items 4703 else: 4704 (h, mi, s) = time_items + [0] 4705 obj = datetime.time(h, mi, s) 4706 elif fieldtype == 'datetime': 4707 if not isinstance(obj, datetime.datetime): 4708 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4709 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4710 while len(time_items)<3: 4711 time_items.append(0) 4712 (h, mi, s) = time_items 4713 obj = datetime.datetime(y, m, d, h, mi, s) 4714 elif fieldtype == 'blob': 4715 pass 4716 elif fieldtype == 'json': 4717 if isinstance(obj, basestring): 4718 obj = self.to_unicode(obj) 4719 if have_serializers: 4720 obj = serializers.loads_json(obj) 4721 elif simplejson: 4722 obj = simplejson.loads(obj) 4723 else: 4724 raise RuntimeError("missing simplejson") 4725 elif is_string and field_is_type('list:string'): 4726 return map(self.to_unicode,obj) 4727 elif is_list: 4728 return map(int,obj) 4729 else: 4730 obj = self.to_unicode(obj) 4731 return obj
4732
4733 - def _insert(self,table,fields):
4734 return 'insert %s in %s' % (fields, table)
4735
4736 - def _count(self,query,distinct=None):
4737 return 'count %s' % repr(query)
4738
4739 - def _select(self,query,fields,attributes):
4740 return 'select %s where %s' % (repr(fields), repr(query))
4741
4742 - def _delete(self,tablename, query):
4743 return 'delete %s where %s' % (repr(tablename),repr(query))
4744
4745 - def _update(self,tablename,query,fields):
4746 return 'update %s (%s) where %s' % (repr(tablename), 4747 repr(fields),repr(query))
4748
4749 - def commit(self):
4750 """ 4751 remember: no transactions on many NoSQL 4752 """ 4753 pass
4754
4755 - def rollback(self):
4756 """ 4757 remember: no transactions on many NoSQL 4758 """ 4759 pass
4760
4761 - def close_connection(self):
4762 """ 4763 remember: no transactions on many NoSQL 4764 """ 4765 pass
4766 4767 4768 # these functions should never be called!
4769 - def OR(self,first,second): raise SyntaxError("Not supported")
4770 - def AND(self,first,second): raise SyntaxError("Not supported")
4771 - def AS(self,first,second): raise SyntaxError("Not supported")
4772 - def ON(self,first,second): raise SyntaxError("Not supported")
4773 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4774 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4775 - def ADD(self,first,second): raise SyntaxError("Not supported")
4776 - def SUB(self,first,second): raise SyntaxError("Not supported")
4777 - def MUL(self,first,second): raise SyntaxError("Not supported")
4778 - def DIV(self,first,second): raise SyntaxError("Not supported")
4779 - def LOWER(self,first): raise SyntaxError("Not supported")
4780 - def UPPER(self,first): raise SyntaxError("Not supported")
4781 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4782 - def LENGTH(self, first): raise SyntaxError("Not supported")
4783 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4784 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4785 - def RANDOM(self): raise SyntaxError("Not supported")
4786 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4787 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4788 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4789 - def drop(self,table,mode): raise SyntaxError("Not supported")
4790 - def alias(self,table,alias): raise SyntaxError("Not supported")
4791 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4792 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4793 - def prepare(self,key): raise SyntaxError("Not supported")
4794 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4795 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4796 - def concat_add(self,table): raise SyntaxError("Not supported")
4797 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4798 - def create_sequence_and_triggers(self, query, table, **args): pass
4799 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4800 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4801 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4802 - def lastrowid(self,table): raise SyntaxError("Not supported")
4803 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4804
4805 4806 -class GAEF(object):
4807 - def __init__(self,name,op,value,apply):
4808 self.name=name=='id' and '__key__' or name 4809 self.op=op 4810 self.value=value 4811 self.apply=apply
4812 - def __repr__(self):
4813 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4814
4815 -class GoogleDatastoreAdapter(NoSQLAdapter):
4816 """ 4817 NDB: 4818 4819 You can enable NDB by using adapter_args: 4820 4821 db = DAL('google:datastore', adapter_args={'ndb_settings':ndb_settings, 'use_ndb':True}) 4822 4823 ndb_settings is optional and can be used for per model caching settings. 4824 It must be a dict in this form: 4825 ndb_settings = {<table_name>:{<variable_name>:<variable_value>}} 4826 See: https://developers.google.com/appengine/docs/python/ndb/cache 4827 """ 4828 4829 uploads_in_blob = True 4830 types = {} 4831 # reconnect is not required for Datastore dbs 4832 reconnect = lambda *args, **kwargs: None 4833
4834 - def file_exists(self, filename): pass
4835 - def file_open(self, filename, mode='rb', lock=True): pass
4836 - def file_close(self, fileobj): pass
4837 4838 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4839
4840 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4841 credential_decoder=IDENTITY, driver_args={}, 4842 adapter_args={}, do_connect=True, after_connection=None):
4843 self.use_ndb = ('use_ndb' in adapter_args) and adapter_args['use_ndb'] 4844 if self.use_ndb is True: 4845 self.types.update({ 4846 'boolean': ndb.BooleanProperty, 4847 'string': (lambda **kwargs: ndb.StringProperty(**kwargs)), 4848 'text': ndb.TextProperty, 4849 'json': ndb.TextProperty, 4850 'password': ndb.StringProperty, 4851 'blob': ndb.BlobProperty, 4852 'upload': ndb.StringProperty, 4853 'integer': ndb.IntegerProperty, 4854 'bigint': ndb.IntegerProperty, 4855 'float': ndb.FloatProperty, 4856 'double': ndb.FloatProperty, 4857 'decimal': NDBDecimalProperty, 4858 'date': ndb.DateProperty, 4859 'time': ndb.TimeProperty, 4860 'datetime': ndb.DateTimeProperty, 4861 'id': None, 4862 'reference': ndb.IntegerProperty, 4863 'list:string': (lambda **kwargs: ndb.StringProperty(repeated=True,default=None, **kwargs)), 4864 'list:integer': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)), 4865 'list:reference': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)), 4866 }) 4867 else: 4868 self.types.update({ 4869 'boolean': gae.BooleanProperty, 4870 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 4871 'text': gae.TextProperty, 4872 'json': gae.TextProperty, 4873 'password': gae.StringProperty, 4874 'blob': gae.BlobProperty, 4875 'upload': gae.StringProperty, 4876 'integer': gae.IntegerProperty, 4877 'bigint': gae.IntegerProperty, 4878 'float': gae.FloatProperty, 4879 'double': gae.FloatProperty, 4880 'decimal': GAEDecimalProperty, 4881 'date': gae.DateProperty, 4882 'time': gae.TimeProperty, 4883 'datetime': gae.DateTimeProperty, 4884 'id': None, 4885 'reference': gae.IntegerProperty, 4886 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 4887 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4888 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4889 }) 4890 self.db = db 4891 self.uri = uri 4892 self.dbengine = 'google:datastore' 4893 self.folder = folder 4894 db['_lastsql'] = '' 4895 self.db_codec = 'UTF-8' 4896 self._after_connection = after_connection 4897 self.pool_size = 0 4898 match = self.REGEX_NAMESPACE.match(uri) 4899 if match: 4900 namespace_manager.set_namespace(match.group('namespace')) 4901 self.keyfunc = (self.use_ndb and ndb.Key) or Key.from_path 4902 4903 self.ndb_settings = None 4904 if 'ndb_settings' in adapter_args: 4905 self.ndb_settings = adapter_args['ndb_settings']
4906
4907 - def parse_id(self, value, field_type):
4908 return value
4909
4910 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4911 myfields = {} 4912 for field in table: 4913 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4914 continue 4915 attr = {} 4916 if isinstance(field.custom_qualifier, dict): 4917 #this is custom properties to add to the GAE field declartion 4918 attr = field.custom_qualifier 4919 field_type = field.type 4920 if isinstance(field_type, SQLCustomType): 4921 ftype = self.types[field_type.native or field_type.type](**attr) 4922 elif isinstance(field_type, ((self.use_ndb and ndb.Property) or gae.Property)): 4923 ftype = field_type 4924 elif field_type.startswith('id'): 4925 continue 4926 elif field_type.startswith('decimal'): 4927 precision, scale = field_type[7:].strip('()').split(',') 4928 precision = int(precision) 4929 scale = int(scale) 4930 dec_cls = (self.use_ndb and NDBDecimalProperty) or GAEDecimalProperty 4931 ftype = dec_cls(precision, scale, **attr) 4932 elif field_type.startswith('reference'): 4933 if field.notnull: 4934 attr = dict(required=True) 4935 ftype = self.types[field_type[:9]](**attr) 4936 elif field_type.startswith('list:reference'): 4937 if field.notnull: 4938 attr['required'] = True 4939 ftype = self.types[field_type[:14]](**attr) 4940 elif field_type.startswith('list:'): 4941 ftype = self.types[field_type](**attr) 4942 elif not field_type in self.types\ 4943 or not self.types[field_type]: 4944 raise SyntaxError('Field: unknown field type: %s' % field_type) 4945 else: 4946 ftype = self.types[field_type](**attr) 4947 myfields[field.name] = ftype 4948 if not polymodel: 4949 model_cls = (self.use_ndb and ndb.Model) or gae.Model 4950 table._tableobj = classobj(table._tablename, (model_cls, ), myfields) 4951 if self.use_ndb: 4952 # Set NDB caching variables 4953 if self.ndb_settings and (table._tablename in self.ndb_settings): 4954 for k, v in self.ndb_settings.iteritems(): 4955 setattr(table._tableobj, k, v) 4956 elif polymodel==True: 4957 pm_cls = (self.use_ndb and NDBPolyModel) or PolyModel 4958 table._tableobj = classobj(table._tablename, (pm_cls, ), myfields) 4959 elif isinstance(polymodel,Table): 4960 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4961 else: 4962 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4963 return None
4964
4965 - def expand(self,expression,field_type=None):
4966 if isinstance(expression,Field): 4967 if expression.type in ('text', 'blob', 'json'): 4968 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4969 return expression.name 4970 elif isinstance(expression, (Expression, Query)): 4971 if not expression.second is None: 4972 return expression.op(expression.first, expression.second) 4973 elif not expression.first is None: 4974 return expression.op(expression.first) 4975 else: 4976 return expression.op() 4977 elif field_type: 4978 return self.represent(expression,field_type) 4979 elif isinstance(expression,(list,tuple)): 4980 return ','.join([self.represent(item,field_type) for item in expression]) 4981 else: 4982 return str(expression)
4983 4984 ### TODO from gql.py Expression
4985 - def AND(self,first,second):
4986 a = self.expand(first) 4987 b = self.expand(second) 4988 if b[0].name=='__key__' and a[0].name!='__key__': 4989 return b+a 4990 return a+b
4991
4992 - def EQ(self,first,second=None):
4993 if isinstance(second, Key): 4994 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4995 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4996
4997 - def NE(self,first,second=None):
4998 if first.type != 'id': 4999 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 5000 else: 5001 if not second is None: 5002 second = Key.from_path(first._tablename, long(second)) 5003 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
5004
5005 - def LT(self,first,second=None):
5006 if first.type != 'id': 5007 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 5008 else: 5009 second = Key.from_path(first._tablename, long(second)) 5010 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
5011
5012 - def LE(self,first,second=None):
5013 if first.type != 'id': 5014 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 5015 else: 5016 second = Key.from_path(first._tablename, long(second)) 5017 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
5018
5019 - def GT(self,first,second=None):
5020 if first.type != 'id' or second==0 or second == '0': 5021 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 5022 else: 5023 second = Key.from_path(first._tablename, long(second)) 5024 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
5025
5026 - def GE(self,first,second=None):
5027 if first.type != 'id': 5028 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 5029 else: 5030 second = Key.from_path(first._tablename, long(second)) 5031 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
5032
5033 - def INVERT(self,first):
5034 return '-%s' % first.name
5035
5036 - def COMMA(self,first,second):
5037 return '%s, %s' % (self.expand(first),self.expand(second))
5038
5039 - def BELONGS(self,first,second=None):
5040 if not isinstance(second,(list, tuple, set)): 5041 raise SyntaxError("Not supported") 5042 if not self.use_ndb: 5043 if isinstance(second,set): 5044 second = list(second) 5045 if first.type == 'id': 5046 second = [Key.from_path(first._tablename, int(i)) for i in second] 5047 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
5048
5049 - def CONTAINS(self,first,second,case_sensitive=False):
5050 # silently ignoring: GAE can only do case sensitive matches! 5051 if not first.type.startswith('list:'): 5052 raise SyntaxError("Not supported") 5053 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
5054
5055 - def NOT(self,first):
5056 nops = { self.EQ: self.NE, 5057 self.NE: self.EQ, 5058 self.LT: self.GE, 5059 self.GT: self.LE, 5060 self.LE: self.GT, 5061 self.GE: self.LT} 5062 if not isinstance(first,Query): 5063 raise SyntaxError("Not suported") 5064 nop = nops.get(first.op,None) 5065 if not nop: 5066 raise SyntaxError("Not suported %s" % first.op.__name__) 5067 first.op = nop 5068 return self.expand(first)
5069
5070 - def truncate(self,table,mode):
5071 self.db(self.db._adapter.id_query(table)).delete()
5072 5073 GAE_FILTER_OPTIONS = { 5074 '=': lambda q, t, p, v: q.filter(getattr(t,p) == v), 5075 '>': lambda q, t, p, v: q.filter(getattr(t,p) > v), 5076 '<': lambda q, t, p, v: q.filter(getattr(t,p) < v), 5077 '<=': lambda q, t, p, v: q.filter(getattr(t,p) <= v), 5078 '>=': lambda q, t, p, v: q.filter(getattr(t,p) >= v), 5079 '!=': lambda q, t, p, v: q.filter(getattr(t,p) != v), 5080 'in': lambda q, t, p, v: q.filter(getattr(t,p).IN(v)), 5081 } 5082
5083 - def filter(self, query, tableobj, prop, op, value):
5084 return self.GAE_FILTER_OPTIONS[op](query, tableobj, prop, value)
5085
5086 - def select_raw(self,query,fields=None,attributes=None):
5087 db = self.db 5088 fields = fields or [] 5089 attributes = attributes or {} 5090 args_get = attributes.get 5091 new_fields = [] 5092 for item in fields: 5093 if isinstance(item,SQLALL): 5094 new_fields += item._table 5095 else: 5096 new_fields.append(item) 5097 fields = new_fields 5098 if query: 5099 tablename = self.get_table(query) 5100 elif fields: 5101 tablename = fields[0].tablename 5102 query = db._adapter.id_query(fields[0].table) 5103 else: 5104 raise SyntaxError("Unable to determine a tablename") 5105 5106 if query: 5107 if use_common_filters(query): 5108 query = self.common_filter(query,[tablename]) 5109 5110 #tableobj is a GAE/NDB Model class (or subclass) 5111 tableobj = db[tablename]._tableobj 5112 filters = self.expand(query) 5113 5114 projection = None 5115 if len(db[tablename].fields) == len(fields): 5116 #getting all fields, not a projection query 5117 projection = None 5118 elif args_get('projection') == True: 5119 projection = [] 5120 for f in fields: 5121 if f.type in ['text', 'blob', 'json']: 5122 raise SyntaxError( 5123 "text and blob field types not allowed in projection queries") 5124 else: 5125 projection.append(f.name) 5126 elif args_get('filterfields') == True: 5127 projection = [] 5128 for f in fields: 5129 projection.append(f.name) 5130 5131 # real projection's can't include 'id'. 5132 # it will be added to the result later 5133 query_projection = [ 5134 p for p in projection if \ 5135 p != db[tablename]._id.name] if projection and \ 5136 args_get('projection') == True\ 5137 else None 5138 5139 cursor = None 5140 if isinstance(args_get('reusecursor'), str): 5141 cursor = args_get('reusecursor') 5142 if self.use_ndb: 5143 qo = ndb.QueryOptions(projection=query_projection, cursor=cursor) 5144 items = tableobj.query(default_options=qo) 5145 else: 5146 items = gae.Query(tableobj, projection=query_projection, 5147 cursor=cursor) 5148 5149 for filter in filters: 5150 if args_get('projection') == True and \ 5151 filter.name in query_projection and \ 5152 filter.op in ['=', '<=', '>=']: 5153 raise SyntaxError( 5154 "projection fields cannot have equality filters") 5155 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 5156 continue 5157 elif filter.name=='__key__' and filter.op=='=': 5158 if filter.value==0: 5159 items = [] 5160 elif isinstance(filter.value, (self.use_ndb and ndb.Key) or Key): 5161 # key qeuries return a class instance, 5162 # can't use projection 5163 # extra values will be ignored in post-processing later 5164 item = filter.value.get() if self.use_ndb else tableobj.get(filter.value) 5165 items = (item and [item]) or [] 5166 else: 5167 # key qeuries return a class instance, 5168 # can't use projection 5169 # extra values will be ignored in post-processing later 5170 item = tableobj.get_by_id(filter.value) 5171 items = (item and [item]) or [] 5172 elif isinstance(items,list): # i.e. there is a single record! 5173 items = [i for i in items if filter.apply( 5174 getattr(item,filter.name),filter.value)] 5175 else: 5176 if filter.name=='__key__' and filter.op != 'in': 5177 if self.use_ndb: 5178 items.order(tableobj._key) 5179 else: 5180 items.order('__key__') 5181 items = self.filter(items, tableobj, filter.name, 5182 filter.op, filter.value) \ 5183 if self.use_ndb else \ 5184 items.filter('%s %s' % (filter.name,filter.op), 5185 filter.value) 5186 5187 if not isinstance(items,list): 5188 if args_get('left', None): 5189 raise SyntaxError('Set: no left join in appengine') 5190 if args_get('groupby', None): 5191 raise SyntaxError('Set: no groupby in appengine') 5192 orderby = args_get('orderby', False) 5193 if orderby: 5194 ### THIS REALLY NEEDS IMPROVEMENT !!! 5195 if isinstance(orderby, (list, tuple)): 5196 orderby = xorify(orderby) 5197 if isinstance(orderby,Expression): 5198 orderby = self.expand(orderby) 5199 orders = orderby.split(', ') 5200 for order in orders: 5201 if self.use_ndb: 5202 #TODO There must be a better way 5203 def make_order(o): 5204 s = str(o) 5205 desc = s[0] == '-' 5206 s = (desc and s[1:]) or s 5207 return (desc and -getattr(tableobj, s)) or getattr(tableobj, s)
5208 _order = {'-id':-tableobj._key,'id':tableobj._key}.get(order) 5209 if _order is None: 5210 _order = make_order(order) 5211 items = items.order(_order) 5212 else: 5213 order={'-id':'-__key__','id':'__key__'}.get(order,order) 5214 items = items.order(order) 5215 5216 if args_get('limitby', None): 5217 5218 (lmin, lmax) = attributes['limitby'] 5219 (limit, offset) = (lmax - lmin, lmin) 5220 if self.use_ndb: 5221 rows, cursor, more = items.fetch_page(limit,offset=offset,keys_only=True) 5222 else: 5223 rows = items.fetch(limit,offset=offset,keys_only=True) 5224 5225 rows = ndb.get_multi(rows) if self.use_ndb else gae.get(rows) 5226 #cursor is only useful if there was a limit and we didn't return 5227 # all results 5228 if args_get('reusecursor'): 5229 db['_lastcursor'] = cursor if self.use_ndb else items.cursor() 5230 items = rows 5231 return (items, tablename, projection or db[tablename].fields)
5232
5233 - def select(self,query,fields,attributes):
5234 """ 5235 This is the GAE version of select. some notes to consider: 5236 - db['_lastsql'] is not set because there is not SQL statement string 5237 for a GAE query 5238 - 'nativeRef' is a magical fieldname used for self references on GAE 5239 - optional attribute 'projection' when set to True will trigger 5240 use of the GAE projection queries. note that there are rules for 5241 what is accepted imposed by GAE: each field must be indexed, 5242 projection queries cannot contain blob or text fields, and you 5243 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 5244 - optional attribute 'filterfields' when set to True web2py will only 5245 parse the explicitly listed fields into the Rows object, even though 5246 all fields are returned in the query. This can be used to reduce 5247 memory usage in cases where true projection queries are not 5248 usable. 5249 - optional attribute 'reusecursor' allows use of cursor with queries 5250 that have the limitby attribute. Set the attribute to True for the 5251 first query, set it to the value of db['_lastcursor'] to continue 5252 a previous query. The user must save the cursor value between 5253 requests, and the filters must be identical. It is up to the user 5254 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 5255 """ 5256 5257 (items, tablename, fields) = self.select_raw(query,fields,attributes) 5258 # self.db['_lastsql'] = self._select(query,fields,attributes) 5259 rows = [[(t==self.db[tablename]._id.name and item) or \ 5260 (t=='nativeRef' and item) or getattr(item, t) \ 5261 for t in fields] for item in items] 5262 colnames = ['%s.%s' % (tablename, t) for t in fields] 5263 processor = attributes.get('processor',self.parse) 5264 return processor(rows,fields,colnames,False)
5265
5266 - def parse_list_integers(self, value, field_type):
5267 return value[:] if self.use_ndb else value
5268
5269 - def parse_list_strings(self, value, field_type):
5270 return value[:] if self.use_ndb else value
5271
5272 - def count(self,query,distinct=None,limit=None):
5273 if distinct: 5274 raise RuntimeError("COUNT DISTINCT not supported") 5275 (items, tablename, fields) = self.select_raw(query) 5276 # self.db['_lastsql'] = self._count(query) 5277 try: 5278 return len(items) 5279 except TypeError: 5280 return items.count(limit=limit)
5281
5282 - def delete(self,tablename, query):
5283 """ 5284 This function was changed on 2010-05-04 because according to 5285 http://code.google.com/p/googleappengine/issues/detail?id=3119 5286 GAE no longer supports deleting more than 1000 records. 5287 """ 5288 # self.db['_lastsql'] = self._delete(tablename,query) 5289 (items, tablename, fields) = self.select_raw(query) 5290 # items can be one item or a query 5291 if not isinstance(items,list): 5292 #use a keys_only query to ensure that this runs as a datastore 5293 # small operations 5294 leftitems = items.fetch(1000, keys_only=True) 5295 counter = 0 5296 while len(leftitems): 5297 counter += len(leftitems) 5298 if self.use_ndb: 5299 ndb.delete_multi(leftitems) 5300 else: 5301 gae.delete(leftitems) 5302 leftitems = items.fetch(1000, keys_only=True) 5303 else: 5304 counter = len(items) 5305 if self.use_ndb: 5306 ndb.delete_multi([item.key for item in items]) 5307 else: 5308 gae.delete(items) 5309 return counter
5310
5311 - def update(self,tablename,query,update_fields):
5312 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 5313 (items, tablename, fields) = self.select_raw(query) 5314 counter = 0 5315 for item in items: 5316 for field, value in update_fields: 5317 setattr(item, field.name, self.represent(value,field.type)) 5318 item.put() 5319 counter += 1 5320 LOGGER.info(str(counter)) 5321 return counter
5322
5323 - def insert(self,table,fields):
5324 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 5325 # table._db['_lastsql'] = self._insert(table,fields) 5326 tmp = table._tableobj(**dfields) 5327 tmp.put() 5328 key = tmp.key if self.use_ndb else tmp.key() 5329 rid = Reference(key.id()) 5330 (rid._table, rid._record, rid._gaekey) = (table, None, key) 5331 return rid
5332
5333 - def bulk_insert(self,table,items):
5334 parsed_items = [] 5335 for item in items: 5336 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 5337 parsed_items.append(table._tableobj(**dfields)) 5338 if self.use_ndb: 5339 ndb.put_multi(parsed_items) 5340 else: 5341 gae.put(parsed_items) 5342 return True
5343
5344 -def uuid2int(uuidv):
5345 return uuid.UUID(uuidv).int
5346
5347 -def int2uuid(n):
5348 return str(uuid.UUID(int=n))
5349
5350 -class CouchDBAdapter(NoSQLAdapter):
5351 drivers = ('couchdb',) 5352 5353 uploads_in_blob = True 5354 types = { 5355 'boolean': bool, 5356 'string': str, 5357 'text': str, 5358 'json': str, 5359 'password': str, 5360 'blob': str, 5361 'upload': str, 5362 'integer': long, 5363 'bigint': long, 5364 'float': float, 5365 'double': float, 5366 'date': datetime.date, 5367 'time': datetime.time, 5368 'datetime': datetime.datetime, 5369 'id': long, 5370 'reference': long, 5371 'list:string': list, 5372 'list:integer': list, 5373 'list:reference': list, 5374 } 5375
5376 - def file_exists(self, filename): pass
5377 - def file_open(self, filename, mode='rb', lock=True): pass
5378 - def file_close(self, fileobj): pass
5379
5380 - def expand(self,expression,field_type=None):
5381 if isinstance(expression,Field): 5382 if expression.type=='id': 5383 return "%s._id" % expression.tablename 5384 return BaseAdapter.expand(self,expression,field_type)
5385
5386 - def AND(self,first,second):
5387 return '(%s && %s)' % (self.expand(first),self.expand(second))
5388
5389 - def OR(self,first,second):
5390 return '(%s || %s)' % (self.expand(first),self.expand(second))
5391
5392 - def EQ(self,first,second):
5393 if second is None: 5394 return '(%s == null)' % self.expand(first) 5395 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
5396
5397 - def NE(self,first,second):
5398 if second is None: 5399 return '(%s != null)' % self.expand(first) 5400 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
5401
5402 - def COMMA(self,first,second):
5403 return '%s + %s' % (self.expand(first),self.expand(second))
5404
5405 - def represent(self, obj, fieldtype):
5406 value = NoSQLAdapter.represent(self, obj, fieldtype) 5407 if fieldtype=='id': 5408 return repr(str(long(value))) 5409 elif fieldtype in ('date','time','datetime','boolean'): 5410 return serializers.json(value) 5411 return repr(not isinstance(value,unicode) and value \ 5412 or value and value.encode('utf8'))
5413
5414 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 5415 pool_size=0,folder=None,db_codec ='UTF-8', 5416 credential_decoder=IDENTITY, driver_args={}, 5417 adapter_args={}, do_connect=True, after_connection=None):
5418 self.db = db 5419 self.uri = uri 5420 if do_connect: self.find_driver(adapter_args) 5421 self.dbengine = 'couchdb' 5422 self.folder = folder 5423 db['_lastsql'] = '' 5424 self.db_codec = 'UTF-8' 5425 self._after_connection = after_connection 5426 self.pool_size = pool_size 5427 5428 url='http://'+uri[10:] 5429 def connector(url=url,driver_args=driver_args): 5430 return self.driver.Server(url,**driver_args)
5431 self.reconnect(connector,cursor=False)
5432
5433 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
5434 if migrate: 5435 try: 5436 self.connection.create(table._tablename) 5437 except: 5438 pass
5439
5440 - def insert(self,table,fields):
5441 id = uuid2int(web2py_uuid()) 5442 ctable = self.connection[table._tablename] 5443 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 5444 values['_id'] = str(id) 5445 ctable.save(values) 5446 return id
5447
5448 - def _select(self,query,fields,attributes):
5449 if not isinstance(query,Query): 5450 raise SyntaxError("Not Supported") 5451 for key in set(attributes.keys())-SELECT_ARGS: 5452 raise SyntaxError('invalid select attribute: %s' % key) 5453 new_fields=[] 5454 for item in fields: 5455 if isinstance(item,SQLALL): 5456 new_fields += item._table 5457 else: 5458 new_fields.append(item) 5459 def uid(fd): 5460 return fd=='id' and '_id' or fd
5461 def get(row,fd): 5462 return fd=='id' and long(row['_id']) or row.get(fd,None) 5463 fields = new_fields 5464 tablename = self.get_table(query) 5465 fieldnames = [f.name for f in (fields or self.db[tablename])] 5466 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 5467 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 5468 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 5469 dict(t=tablename, 5470 query=self.expand(query), 5471 order='%s._id' % tablename, 5472 fields=fields) 5473 return fn, colnames 5474
5475 - def select(self,query,fields,attributes):
5476 if not isinstance(query,Query): 5477 raise SyntaxError("Not Supported") 5478 fn, colnames = self._select(query,fields,attributes) 5479 tablename = colnames[0].split('.')[0] 5480 ctable = self.connection[tablename] 5481 rows = [cols['value'] for cols in ctable.query(fn)] 5482 processor = attributes.get('processor',self.parse) 5483 return processor(rows,fields,colnames,False)
5484
5485 - def delete(self,tablename,query):
5486 if not isinstance(query,Query): 5487 raise SyntaxError("Not Supported") 5488 if query.first.type=='id' and query.op==self.EQ: 5489 id = query.second 5490 tablename = query.first.tablename 5491 assert(tablename == query.first.tablename) 5492 ctable = self.connection[tablename] 5493 try: 5494 del ctable[str(id)] 5495 return 1 5496 except couchdb.http.ResourceNotFound: 5497 return 0 5498 else: 5499 tablename = self.get_table(query) 5500 rows = self.select(query,[self.db[tablename]._id],{}) 5501 ctable = self.connection[tablename] 5502 for row in rows: 5503 del ctable[str(row.id)] 5504 return len(rows)
5505
5506 - def update(self,tablename,query,fields):
5507 if not isinstance(query,Query): 5508 raise SyntaxError("Not Supported") 5509 if query.first.type=='id' and query.op==self.EQ: 5510 id = query.second 5511 tablename = query.first.tablename 5512 ctable = self.connection[tablename] 5513 try: 5514 doc = ctable[str(id)] 5515 for key,value in fields: 5516 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5517 ctable.save(doc) 5518 return 1 5519 except couchdb.http.ResourceNotFound: 5520 return 0 5521 else: 5522 tablename = self.get_table(query) 5523 rows = self.select(query,[self.db[tablename]._id],{}) 5524 ctable = self.connection[tablename] 5525 table = self.db[tablename] 5526 for row in rows: 5527 doc = ctable[str(row.id)] 5528 for key,value in fields: 5529 doc[key.name] = self.represent(value,table[key.name].type) 5530 ctable.save(doc) 5531 return len(rows)
5532
5533 - def count(self,query,distinct=None):
5534 if distinct: 5535 raise RuntimeError("COUNT DISTINCT not supported") 5536 if not isinstance(query,Query): 5537 raise SyntaxError("Not Supported") 5538 tablename = self.get_table(query) 5539 rows = self.select(query,[self.db[tablename]._id],{}) 5540 return len(rows)
5541
5542 -def cleanup(text):
5543 """ 5544 validates that the given text is clean: only contains [0-9a-zA-Z_] 5545 """ 5546 #if not REGEX_ALPHANUMERIC.match(text): 5547 # raise SyntaxError('invalid table or field name: %s' % text) 5548 return text
5549
5550 -class MongoDBAdapter(NoSQLAdapter):
5551 native_json = True 5552 drivers = ('pymongo',) 5553 5554 uploads_in_blob = False 5555 5556 types = { 5557 'boolean': bool, 5558 'string': str, 5559 'text': str, 5560 'json': str, 5561 'password': str, 5562 'blob': str, 5563 'upload': str, 5564 'integer': long, 5565 'bigint': long, 5566 'float': float, 5567 'double': float, 5568 'date': datetime.date, 5569 'time': datetime.time, 5570 'datetime': datetime.datetime, 5571 'id': long, 5572 'reference': long, 5573 'list:string': list, 5574 'list:integer': list, 5575 'list:reference': list, 5576 } 5577 5578 error_messages = {"javascript_needed": "This must yet be replaced" + 5579 " with javascript in order to work."} 5580
5581 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5582 pool_size=0, folder=None, db_codec ='UTF-8', 5583 credential_decoder=IDENTITY, driver_args={}, 5584 adapter_args={}, do_connect=True, after_connection=None):
5585 5586 self.db = db 5587 self.uri = uri 5588 if do_connect: self.find_driver(adapter_args) 5589 import random 5590 from bson.objectid import ObjectId 5591 from bson.son import SON 5592 import pymongo.uri_parser 5593 5594 m = pymongo.uri_parser.parse_uri(uri) 5595 5596 self.SON = SON 5597 self.ObjectId = ObjectId 5598 self.random = random 5599 5600 self.dbengine = 'mongodb' 5601 self.folder = folder 5602 db['_lastsql'] = '' 5603 self.db_codec = 'UTF-8' 5604 self._after_connection = after_connection 5605 self.pool_size = pool_size 5606 #this is the minimum amount of replicates that it should wait 5607 # for on insert/update 5608 self.minimumreplication = adapter_args.get('minimumreplication',0) 5609 # by default all inserts and selects are performand asynchronous, 5610 # but now the default is 5611 # synchronous, except when overruled by either this default or 5612 # function parameter 5613 self.safe = adapter_args.get('safe',True) 5614 # load user setting for uploads in blob storage 5615 self.uploads_in_blob = adapter_args.get('uploads_in_blob', False) 5616 5617 if isinstance(m,tuple): 5618 m = {"database" : m[1]} 5619 if m.get('database')==None: 5620 raise SyntaxError("Database is required!") 5621 5622 def connector(uri=self.uri,m=m): 5623 # Connection() is deprecated 5624 if hasattr(self.driver, "MongoClient"): 5625 Connection = self.driver.MongoClient 5626 else: 5627 Connection = self.driver.Connection 5628 return Connection(uri)[m.get('database')]
5629 5630 self.reconnect(connector,cursor=False)
5631
5632 - def object_id(self, arg=None):
5633 """ Convert input to a valid Mongodb ObjectId instance 5634 5635 self.object_id("<random>") -> ObjectId (not unique) instance """ 5636 if not arg: 5637 arg = 0 5638 if isinstance(arg, basestring): 5639 # we assume an integer as default input 5640 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5641 if arg.isdigit() and (not rawhex): 5642 arg = int(arg) 5643 elif arg == "<random>": 5644 arg = int("0x%sL" % \ 5645 "".join([self.random.choice("0123456789abcdef") \ 5646 for x in range(24)]), 0) 5647 elif arg.isalnum(): 5648 if not arg.startswith("0x"): 5649 arg = "0x%s" % arg 5650 try: 5651 arg = int(arg, 0) 5652 except ValueError, e: 5653 raise ValueError( 5654 "invalid objectid argument string: %s" % e) 5655 else: 5656 raise ValueError("Invalid objectid argument string. " + 5657 "Requires an integer or base 16 value") 5658 elif isinstance(arg, self.ObjectId): 5659 return arg 5660 5661 if not isinstance(arg, (int, long)): 5662 raise TypeError("object_id argument must be of type " + 5663 "ObjectId or an objectid representable integer") 5664 hexvalue = hex(arg)[2:].rstrip('L').zfill(24) 5665 return self.ObjectId(hexvalue)
5666
5667 - def parse_reference(self, value, field_type):
5668 # here we have to check for ObjectID before base parse 5669 if isinstance(value, self.ObjectId): 5670 value = long(str(value), 16) 5671 return super(MongoDBAdapter, 5672 self).parse_reference(value, field_type)
5673
5674 - def parse_id(self, value, field_type):
5675 if isinstance(value, self.ObjectId): 5676 value = long(str(value), 16) 5677 return super(MongoDBAdapter, 5678 self).parse_id(value, field_type)
5679
5680 - def represent(self, obj, fieldtype):
5681 # the base adatpter does not support MongoDB ObjectId 5682 if isinstance(obj, self.ObjectId): 5683 value = obj 5684 else: 5685 value = NoSQLAdapter.represent(self, obj, fieldtype) 5686 # reference types must be convert to ObjectID 5687 if fieldtype =='date': 5688 if value == None: 5689 return value 5690 # this piece of data can be stripped off based on the fieldtype 5691 t = datetime.time(0, 0, 0) 5692 # mongodb doesn't has a date object and so it must datetime, 5693 # string or integer 5694 return datetime.datetime.combine(value, t) 5695 elif fieldtype == 'time': 5696 if value == None: 5697 return value 5698 # this piece of data can be stripped of based on the fieldtype 5699 d = datetime.date(2000, 1, 1) 5700 # mongodb doesn't has a time object and so it must datetime, 5701 # string or integer 5702 return datetime.datetime.combine(d, value) 5703 elif fieldtype == "blob": 5704 if value== None: 5705 return value 5706 from bson import Binary 5707 if not isinstance(value, Binary): 5708 if not isinstance(value, basestring): 5709 return Binary(str(value)) 5710 return Binary(value) 5711 return value 5712 elif (isinstance(fieldtype, basestring) and 5713 fieldtype.startswith('list:')): 5714 if fieldtype.startswith('list:reference'): 5715 newval = [] 5716 for v in value: 5717 newval.append(self.object_id(v)) 5718 return newval 5719 return value 5720 elif ((isinstance(fieldtype, basestring) and 5721 fieldtype.startswith("reference")) or 5722 (isinstance(fieldtype, Table)) or fieldtype=="id"): 5723 value = self.object_id(value) 5724 return value
5725
5726 - def create_table(self, table, migrate=True, fake_migrate=False, 5727 polymodel=None, isCapped=False):
5728 if isCapped: 5729 raise RuntimeError("Not implemented")
5730
5731 - def count(self, query, distinct=None, snapshot=True):
5732 if distinct: 5733 raise RuntimeError("COUNT DISTINCT not supported") 5734 if not isinstance(query,Query): 5735 raise SyntaxError("Not Supported") 5736 tablename = self.get_table(query) 5737 return long(self.select(query,[self.db[tablename]._id], {}, 5738 count=True,snapshot=snapshot)['count'])
5739 # Maybe it would be faster if we just implemented the pymongo 5740 # .count() function which is probably quicker? 5741 # therefor call __select() connection[table].find(query).count() 5742 # Since this will probably reduce the return set? 5743
5744 - def expand(self, expression, field_type=None):
5745 if isinstance(expression, Query): 5746 # any query using 'id':= 5747 # set name as _id (as per pymongo/mongodb primary key) 5748 # convert second arg to an objectid field 5749 # (if its not already) 5750 # if second arg is 0 convert to objectid 5751 if isinstance(expression.first,Field) and \ 5752 ((expression.first.type == 'id') or \ 5753 ("reference" in expression.first.type)): 5754 if expression.first.type == 'id': 5755 expression.first.name = '_id' 5756 # cast to Mongo ObjectId 5757 if isinstance(expression.second, (tuple, list, set)): 5758 expression.second = [self.object_id(item) for 5759 item in expression.second] 5760 else: 5761 expression.second = self.object_id(expression.second) 5762 result = expression.op(expression.first, expression.second) 5763 5764 if isinstance(expression, Field): 5765 if expression.type=='id': 5766 result = "_id" 5767 else: 5768 result = expression.name 5769 elif isinstance(expression, (Expression, Query)): 5770 if not expression.second is None: 5771 result = expression.op(expression.first, expression.second) 5772 elif not expression.first is None: 5773 result = expression.op(expression.first) 5774 elif not isinstance(expression.op, str): 5775 result = expression.op() 5776 else: 5777 result = expression.op 5778 elif field_type: 5779 result = self.represent(expression,field_type) 5780 elif isinstance(expression,(list,tuple)): 5781 result = ','.join(self.represent(item,field_type) for 5782 item in expression) 5783 else: 5784 result = expression 5785 return result
5786
5787 - def drop(self, table, mode=''):
5788 ctable = self.connection[table._tablename] 5789 ctable.drop()
5790
5791 - def truncate(self, table, mode, safe=None):
5792 if safe == None: 5793 safe=self.safe 5794 ctable = self.connection[table._tablename] 5795 ctable.remove(None, safe=True)
5796
5797 - def select(self, query, fields, attributes, count=False, 5798 snapshot=False):
5799 mongofields_dict = self.SON() 5800 mongoqry_dict = {} 5801 new_fields=[] 5802 mongosort_list = [] 5803 # try an orderby attribute 5804 orderby = attributes.get('orderby', False) 5805 limitby = attributes.get('limitby', False) 5806 # distinct = attributes.get('distinct', False) 5807 if 'for_update' in attributes: 5808 logging.warn('mongodb does not support for_update') 5809 for key in set(attributes.keys())-set(('limitby', 5810 'orderby','for_update')): 5811 if attributes[key]!=None: 5812 logging.warn('select attribute not implemented: %s' % key) 5813 if limitby: 5814 limitby_skip, limitby_limit = limitby[0], int(limitby[1]) 5815 else: 5816 limitby_skip = limitby_limit = 0 5817 if orderby: 5818 if isinstance(orderby, (list, tuple)): 5819 orderby = xorify(orderby) 5820 # !!!! need to add 'random' 5821 for f in self.expand(orderby).split(','): 5822 if f.startswith('-'): 5823 mongosort_list.append((f[1:], -1)) 5824 else: 5825 mongosort_list.append((f, 1)) 5826 for item in fields: 5827 if isinstance(item, SQLALL): 5828 new_fields += item._table 5829 else: 5830 new_fields.append(item) 5831 fields = new_fields 5832 if isinstance(query,Query): 5833 tablename = self.get_table(query) 5834 elif len(fields) != 0: 5835 tablename = fields[0].tablename 5836 else: 5837 raise SyntaxError("The table name could not be found in " + 5838 "the query nor from the select statement.") 5839 mongoqry_dict = self.expand(query) 5840 fields = fields or self.db[tablename] 5841 for field in fields: 5842 mongofields_dict[field.name] = 1 5843 ctable = self.connection[tablename] 5844 if count: 5845 return {'count' : ctable.find( 5846 mongoqry_dict, mongofields_dict, 5847 skip=limitby_skip, limit=limitby_limit, 5848 sort=mongosort_list, snapshot=snapshot).count()} 5849 else: 5850 # pymongo cursor object 5851 mongo_list_dicts = ctable.find(mongoqry_dict, 5852 mongofields_dict, skip=limitby_skip, 5853 limit=limitby_limit, sort=mongosort_list, 5854 snapshot=snapshot) 5855 rows = [] 5856 # populate row in proper order 5857 # Here we replace ._id with .id to follow the standard naming 5858 colnames = [] 5859 newnames = [] 5860 for field in fields: 5861 colname = str(field) 5862 colnames.append(colname) 5863 tablename, fieldname = colname.split(".") 5864 if fieldname == "_id": 5865 # Mongodb reserved uuid key 5866 field.name = "id" 5867 newnames.append(".".join((tablename, field.name))) 5868 5869 for record in mongo_list_dicts: 5870 row=[] 5871 for colname in colnames: 5872 tablename, fieldname = colname.split(".") 5873 # switch to Mongo _id uuids for retrieving 5874 # record id's 5875 if fieldname == "id": fieldname = "_id" 5876 if fieldname in record: 5877 value = record[fieldname] 5878 else: 5879 value = None 5880 row.append(value) 5881 rows.append(row) 5882 processor = attributes.get('processor', self.parse) 5883 result = processor(rows, fields, newnames, False) 5884 return result
5885
5886 - def insert(self, table, fields, safe=None):
5887 """Safe determines whether a asynchronious request is done or a 5888 synchronious action is done 5889 For safety, we use by default synchronous requests""" 5890 5891 values = dict() 5892 if safe==None: 5893 safe = self.safe 5894 ctable = self.connection[table._tablename] 5895 for k, v in fields: 5896 if not k.name in ["id", "safe"]: 5897 fieldname = k.name 5898 fieldtype = table[k.name].type 5899 values[fieldname] = self.represent(v, fieldtype) 5900 5901 ctable.insert(values, safe=safe) 5902 return long(str(values['_id']), 16)
5903
5904 - def update(self, tablename, query, fields, safe=None):
5905 if safe == None: 5906 safe = self.safe 5907 # return amount of adjusted rows or zero, but no exceptions 5908 # @ related not finding the result 5909 if not isinstance(query, Query): 5910 raise RuntimeError("Not implemented") 5911 amount = self.count(query, False) 5912 if not isinstance(query, Query): 5913 raise SyntaxError("Not Supported") 5914 filter = None 5915 if query: 5916 filter = self.expand(query) 5917 # do not try to update id fields to avoid backend errors 5918 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5919 k, v in fields if (not k.name in ("_id", "id")))} 5920 try: 5921 result = self.connection[tablename].update(filter, 5922 modify, multi=True, safe=safe) 5923 if safe: 5924 try: 5925 # if result count is available fetch it 5926 return result["n"] 5927 except (KeyError, AttributeError, TypeError): 5928 return amount 5929 else: 5930 return amount 5931 except Exception, e: 5932 # TODO Reverse update query to verifiy that the query succeded 5933 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5934
5935 - def delete(self, tablename, query, safe=None):
5936 if safe is None: 5937 safe = self.safe 5938 amount = 0 5939 amount = self.count(query, False) 5940 if not isinstance(query, Query): 5941 raise RuntimeError("query type %s is not supported" % \ 5942 type(query)) 5943 filter = self.expand(query) 5944 self.connection[tablename].remove(filter, safe=safe) 5945 return amount
5946
5947 - def bulk_insert(self, table, items):
5948 return [self.insert(table,item) for item in items]
5949 5950 ## OPERATORS
5951 - def INVERT(self, first):
5952 #print "in invert first=%s" % first 5953 return '-%s' % self.expand(first)
5954 5955 # TODO This will probably not work:(
5956 - def NOT(self, first):
5957 return {'$not': self.expand(first)}
5958
5959 - def AND(self,first,second):
5960 # pymongo expects: .find({'$and': [{'x':'1'}, {'y':'2'}]}) 5961 return {'$and': [self.expand(first),self.expand(second)]}
5962
5963 - def OR(self,first,second):
5964 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5965 return {'$or': [self.expand(first),self.expand(second)]}
5966
5967 - def BELONGS(self, first, second):
5968 if isinstance(second, str): 5969 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5970 elif second==[] or second==() or second==set(): 5971 return {1:0} 5972 items = [self.expand(item, first.type) for item in second] 5973 return {self.expand(first) : {"$in" : items} }
5974
5975 - def EQ(self,first,second=None):
5976 result = {} 5977 result[self.expand(first)] = self.expand(second) 5978 return result
5979
5980 - def NE(self, first, second=None):
5981 result = {} 5982 result[self.expand(first)] = {'$ne': self.expand(second)} 5983 return result
5984
5985 - def LT(self,first,second=None):
5986 if second is None: 5987 raise RuntimeError("Cannot compare %s < None" % first) 5988 result = {} 5989 result[self.expand(first)] = {'$lt': self.expand(second)} 5990 return result
5991
5992 - def LE(self,first,second=None):
5993 if second is None: 5994 raise RuntimeError("Cannot compare %s <= None" % first) 5995 result = {} 5996 result[self.expand(first)] = {'$lte': self.expand(second)} 5997 return result
5998
5999 - def GT(self,first,second):
6000 result = {} 6001 result[self.expand(first)] = {'$gt': self.expand(second)} 6002 return result
6003
6004 - def GE(self,first,second=None):
6005 if second is None: 6006 raise RuntimeError("Cannot compare %s >= None" % first) 6007 result = {} 6008 result[self.expand(first)] = {'$gte': self.expand(second)} 6009 return result
6010
6011 - def ADD(self, first, second):
6012 raise NotImplementedError(self.error_messages["javascript_needed"]) 6013 return '%s + %s' % (self.expand(first), 6014 self.expand(second, first.type))
6015
6016 - def SUB(self, first, second):
6017 raise NotImplementedError(self.error_messages["javascript_needed"]) 6018 return '(%s - %s)' % (self.expand(first), 6019 self.expand(second, first.type))
6020
6021 - def MUL(self, first, second):
6022 raise NotImplementedError(self.error_messages["javascript_needed"]) 6023 return '(%s * %s)' % (self.expand(first), 6024 self.expand(second, first.type))
6025
6026 - def DIV(self, first, second):
6027 raise NotImplementedError(self.error_messages["javascript_needed"]) 6028 return '(%s / %s)' % (self.expand(first), 6029 self.expand(second, first.type))
6030
6031 - def MOD(self, first, second):
6032 raise NotImplementedError(self.error_messages["javascript_needed"]) 6033 return '(%s %% %s)' % (self.expand(first), 6034 self.expand(second, first.type))
6035
6036 - def AS(self, first, second):
6037 raise NotImplementedError(self.error_messages["javascript_needed"]) 6038 return '%s AS %s' % (self.expand(first), second)
6039 6040 # We could implement an option that simulates a full featured SQL 6041 # database. But I think the option should be set explicit or 6042 # implemented as another library.
6043 - def ON(self, first, second):
6044 raise NotImplementedError("This is not possible in NoSQL" + 6045 " but can be simulated with a wrapper.") 6046 return '%s ON %s' % (self.expand(first), self.expand(second))
6047 6048 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 6049 # WHICH ONE IS BEST? 6050
6051 - def COMMA(self, first, second):
6052 return '%s, %s' % (self.expand(first), self.expand(second))
6053
6054 - def LIKE(self, first, second):
6055 #escaping regex operators? 6056 return {self.expand(first): ('%s' % \ 6057 self.expand(second, 'string').replace('%','/'))}
6058
6059 - def ILIKE(self, first, second):
6060 val = second if isinstance(second,self.ObjectId) else { 6061 '$regex': second.replace('%', ''), '$options': 'i'} 6062 return {self.expand(first): val}
6063
6064 - def STARTSWITH(self, first, second):
6065 #escaping regex operators? 6066 return {self.expand(first): ('/^%s/' % \ 6067 self.expand(second, 'string'))}
6068
6069 - def ENDSWITH(self, first, second):
6070 #escaping regex operators? 6071 return {self.expand(first): ('/%s^/' % \ 6072 self.expand(second, 'string'))}
6073
6074 - def CONTAINS(self, first, second, case_sensitive=False):
6075 # silently ignore, only case sensitive 6076 # There is a technical difference, but mongodb doesn't support 6077 # that, but the result will be the same 6078 val = second if isinstance(second,self.ObjectId) else \ 6079 {'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"} 6080 return {self.expand(first) : val}
6081
6082 - def LIKE(self, first, second):
6083 import re 6084 return {self.expand(first): {'$regex': \ 6085 re.escape(self.expand(second, 6086 'string')).replace('%','.*')}}
6087 6088 #TODO verify full compatibilty with official SQL Like operator
6089 - def STARTSWITH(self, first, second):
6090 #TODO Solve almost the same problem as with endswith 6091 import re 6092 return {self.expand(first): {'$regex' : '^' + 6093 re.escape(self.expand(second, 6094 'string'))}}
6095 6096 #TODO verify full compatibilty with official SQL Like operator
6097 - def ENDSWITH(self, first, second):
6098 #escaping regex operators? 6099 #TODO if searched for a name like zsa_corbitt and the function 6100 # is endswith('a') then this is also returned. 6101 # Aldo it end with a t 6102 import re 6103 return {self.expand(first): {'$regex': \ 6104 re.escape(self.expand(second, 'string')) + '$'}}
6105 6106 #TODO verify full compatibilty with official oracle contains operator
6107 - def CONTAINS(self, first, second, case_sensitive=False):
6108 # silently ignore, only case sensitive 6109 #There is a technical difference, but mongodb doesn't support 6110 # that, but the result will be the same 6111 #TODO contains operators need to be transformed to Regex 6112 return {self.expand(first) : {'$regex': \ 6113 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
6114
6115 6116 -class IMAPAdapter(NoSQLAdapter):
6117 drivers = ('imaplib',) 6118 6119 """ IMAP server adapter 6120 6121 This class is intended as an interface with 6122 email IMAP servers to perform simple queries in the 6123 web2py DAL query syntax, so email read, search and 6124 other related IMAP mail services (as those implemented 6125 by brands like Google(r), and Yahoo!(r) 6126 can be managed from web2py applications. 6127 6128 The code uses examples by Yuji Tomita on this post: 6129 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 6130 and is based in docs for Python imaplib, python email 6131 and email IETF's (i.e. RFC2060 and RFC3501) 6132 6133 This adapter was tested with a small set of operations with Gmail(r). Other 6134 services requests could raise command syntax and response data issues. 6135 6136 It creates its table and field names "statically", 6137 meaning that the developer should leave the table and field 6138 definitions to the DAL instance by calling the adapter's 6139 .define_tables() method. The tables are defined with the 6140 IMAP server mailbox list information. 6141 6142 .define_tables() returns a dictionary mapping dal tablenames 6143 to the server mailbox names with the following structure: 6144 6145 {<tablename>: str <server mailbox name>} 6146 6147 Here is a list of supported fields: 6148 6149 Field Type Description 6150 ################################################################ 6151 uid string 6152 answered boolean Flag 6153 created date 6154 content list:string A list of dict text or html parts 6155 to string 6156 cc string 6157 bcc string 6158 size integer the amount of octets of the message* 6159 deleted boolean Flag 6160 draft boolean Flag 6161 flagged boolean Flag 6162 sender string 6163 recent boolean Flag 6164 seen boolean Flag 6165 subject string 6166 mime string The mime header declaration 6167 email string The complete RFC822 message** 6168 attachments <type list> Each non text part as dict 6169 encoding string The main detected encoding 6170 6171 *At the application side it is measured as the length of the RFC822 6172 message string 6173 6174 WARNING: As row id's are mapped to email sequence numbers, 6175 make sure your imap client web2py app does not delete messages 6176 during select or update actions, to prevent 6177 updating or deleting different messages. 6178 Sequence numbers change whenever the mailbox is updated. 6179 To avoid this sequence numbers issues, it is recommended the use 6180 of uid fields in query references (although the update and delete 6181 in separate actions rule still applies). 6182 6183 # This is the code recommended to start imap support 6184 # at the app's model: 6185 6186 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 6187 imapdb.define_tables() 6188 6189 Here is an (incomplete) list of possible imap commands: 6190 6191 # Count today's unseen messages 6192 # smaller than 6000 octets from the 6193 # inbox mailbox 6194 6195 q = imapdb.INBOX.seen == False 6196 q &= imapdb.INBOX.created == datetime.date.today() 6197 q &= imapdb.INBOX.size < 6000 6198 unread = imapdb(q).count() 6199 6200 # Fetch last query messages 6201 rows = imapdb(q).select() 6202 6203 # it is also possible to filter query select results with limitby and 6204 # sequences of mailbox fields 6205 6206 set.select(<fields sequence>, limitby=(<int>, <int>)) 6207 6208 # Mark last query messages as seen 6209 messages = [row.uid for row in rows] 6210 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 6211 6212 # Delete messages in the imap database that have mails from mr. Gumby 6213 6214 deleted = 0 6215 for mailbox in imapdb.tables 6216 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 6217 6218 # It is possible also to mark messages for deletion instead of ereasing them 6219 # directly with set.update(deleted=True) 6220 6221 6222 # This object give access 6223 # to the adapter auto mailbox 6224 # mapped names (which native 6225 # mailbox has what table name) 6226 6227 imapdb.mailboxes <dict> # tablename, server native name pairs 6228 6229 # To retrieve a table native mailbox name use: 6230 imapdb.<table>.mailbox 6231 6232 ### New features v2.4.1: 6233 6234 # Declare mailboxes statically with tablename, name pairs 6235 # This avoids the extra server names retrieval 6236 6237 imapdb.define_tables({"inbox": "INBOX"}) 6238 6239 # Selects without content/attachments/email columns will only 6240 # fetch header and flags 6241 6242 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 6243 """ 6244 6245 types = { 6246 'string': str, 6247 'text': str, 6248 'date': datetime.date, 6249 'datetime': datetime.datetime, 6250 'id': long, 6251 'boolean': bool, 6252 'integer': int, 6253 'bigint': long, 6254 'blob': str, 6255 'list:string': str, 6256 } 6257 6258 dbengine = 'imap' 6259 6260 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 6261
6262 - def __init__(self, 6263 db, 6264 uri, 6265 pool_size=0, 6266 folder=None, 6267 db_codec ='UTF-8', 6268 credential_decoder=IDENTITY, 6269 driver_args={}, 6270 adapter_args={}, 6271 do_connect=True, 6272 after_connection=None):
6273 6274 # db uri: user@example.com:password@imap.server.com:123 6275 # TODO: max size adapter argument for preventing large mail transfers 6276 6277 self.db = db 6278 self.uri = uri 6279 if do_connect: self.find_driver(adapter_args) 6280 self.pool_size=pool_size 6281 self.folder = folder 6282 self.db_codec = db_codec 6283 self._after_connection = after_connection 6284 self.credential_decoder = credential_decoder 6285 self.driver_args = driver_args 6286 self.adapter_args = adapter_args 6287 self.mailbox_size = None 6288 self.static_names = None 6289 self.charset = sys.getfilesystemencoding() 6290 # imap class 6291 self.imap4 = None 6292 uri = uri.split("://")[1] 6293 6294 """ MESSAGE is an identifier for sequence number""" 6295 6296 self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft', 6297 'flagged': '\\Flagged', 'recent': '\\Recent', 6298 'seen': '\\Seen', 'answered': '\\Answered'} 6299 self.search_fields = { 6300 'id': 'MESSAGE', 'created': 'DATE', 6301 'uid': 'UID', 'sender': 'FROM', 6302 'to': 'TO', 'cc': 'CC', 6303 'bcc': 'BCC', 'content': 'TEXT', 6304 'size': 'SIZE', 'deleted': '\\Deleted', 6305 'draft': '\\Draft', 'flagged': '\\Flagged', 6306 'recent': '\\Recent', 'seen': '\\Seen', 6307 'subject': 'SUBJECT', 'answered': '\\Answered', 6308 'mime': None, 'email': None, 6309 'attachments': None 6310 } 6311 6312 db['_lastsql'] = '' 6313 6314 m = self.REGEX_URI.match(uri) 6315 user = m.group('user') 6316 password = m.group('password') 6317 host = m.group('host') 6318 port = int(m.group('port')) 6319 over_ssl = False 6320 if port==993: 6321 over_ssl = True 6322 6323 driver_args.update(host=host,port=port, password=password, user=user) 6324 def connector(driver_args=driver_args): 6325 # it is assumed sucessful authentication alLways 6326 # TODO: support direct connection and login tests 6327 if over_ssl: 6328 self.imap4 = self.driver.IMAP4_SSL 6329 else: 6330 self.imap4 = self.driver.IMAP4 6331 connection = self.imap4(driver_args["host"], driver_args["port"]) 6332 data = connection.login(driver_args["user"], driver_args["password"]) 6333 6334 # static mailbox list 6335 connection.mailbox_names = None 6336 6337 # dummy cursor function 6338 connection.cursor = lambda : True 6339 6340 return connection
6341 6342 self.db.define_tables = self.define_tables 6343 self.connector = connector 6344 if do_connect: self.reconnect()
6345
6346 - def reconnect(self, f=None, cursor=True):
6347 """ 6348 IMAP4 Pool connection method 6349 6350 imap connection lacks of self cursor command. 6351 A custom command should be provided as a replacement 6352 for connection pooling to prevent uncaught remote session 6353 closing 6354 6355 """ 6356 if getattr(self,'connection',None) != None: 6357 return 6358 if f is None: 6359 f = self.connector 6360 6361 if not self.pool_size: 6362 self.connection = f() 6363 self.cursor = cursor and self.connection.cursor() 6364 else: 6365 POOLS = ConnectionPool.POOLS 6366 uri = self.uri 6367 while True: 6368 GLOBAL_LOCKER.acquire() 6369 if not uri in POOLS: 6370 POOLS[uri] = [] 6371 if POOLS[uri]: 6372 self.connection = POOLS[uri].pop() 6373 GLOBAL_LOCKER.release() 6374 self.cursor = cursor and self.connection.cursor() 6375 if self.cursor and self.check_active_connection: 6376 try: 6377 # check if connection is alive or close it 6378 result, data = self.connection.list() 6379 except: 6380 # Possible connection reset error 6381 # TODO: read exception class 6382 self.connection = f() 6383 break 6384 else: 6385 GLOBAL_LOCKER.release() 6386 self.connection = f() 6387 self.cursor = cursor and self.connection.cursor() 6388 break 6389 self.after_connection_hook()
6390
6391 - def get_last_message(self, tablename):
6392 last_message = None 6393 # request mailbox list to the server if needed. 6394 if not isinstance(self.connection.mailbox_names, dict): 6395 self.get_mailboxes() 6396 try: 6397 result = self.connection.select( 6398 self.connection.mailbox_names[tablename]) 6399 last_message = int(result[1][0]) 6400 # Last message must be a positive integer 6401 if last_message == 0: 6402 last_message = 1 6403 except (IndexError, ValueError, TypeError, KeyError): 6404 e = sys.exc_info()[1] 6405 LOGGER.debug("Error retrieving the last mailbox" + 6406 " sequence number. %s" % str(e)) 6407 return last_message
6408
6409 - def get_uid_bounds(self, tablename):
6410 if not isinstance(self.connection.mailbox_names, dict): 6411 self.get_mailboxes() 6412 # fetch first and last messages 6413 # return (first, last) messages uid's 6414 last_message = self.get_last_message(tablename) 6415 result, data = self.connection.uid("search", None, "(ALL)") 6416 uid_list = data[0].strip().split() 6417 if len(uid_list) <= 0: 6418 return None 6419 else: 6420 return (uid_list[0], uid_list[-1])
6421
6422 - def convert_date(self, date, add=None, imf=False):
6423 if add is None: 6424 add = datetime.timedelta() 6425 """ Convert a date object to a string 6426 with d-Mon-Y style for IMAP or the inverse 6427 case 6428 6429 add <timedelta> adds to the date object 6430 """ 6431 months = [None, "JAN","FEB","MAR","APR","MAY","JUN", 6432 "JUL", "AUG","SEP","OCT","NOV","DEC"] 6433 if isinstance(date, basestring): 6434 # Prevent unexpected date response format 6435 try: 6436 if "," in date: 6437 dayname, datestring = date.split(",") 6438 else: 6439 dayname, datestring = None, date 6440 date_list = datestring.strip().split() 6441 year = int(date_list[2]) 6442 month = months.index(date_list[1].upper()) 6443 day = int(date_list[0]) 6444 hms = map(int, date_list[3].split(":")) 6445 return datetime.datetime(year, month, day, 6446 hms[0], hms[1], hms[2]) + add 6447 except (ValueError, AttributeError, IndexError), e: 6448 LOGGER.error("Could not parse date text: %s. %s" % 6449 (date, e)) 6450 return None 6451 elif isinstance(date, (datetime.date, datetime.datetime)): 6452 if imf: date_format = "%a, %d %b %Y %H:%M:%S %z" 6453 else: date_format = "%d-%b-%Y" 6454 return (date + add).strftime(date_format) 6455 else: 6456 return None
6457 6458 @staticmethod
6459 - def header_represent(f, r):
6460 from email.header import decode_header 6461 text, encoding = decode_header(f)[0] 6462 if encoding: 6463 text = text.decode(encoding).encode('utf-8') 6464 return text
6465
6466 - def encode_text(self, text, charset, errors="replace"):
6467 """ convert text for mail to unicode""" 6468 if text is None: 6469 text = "" 6470 else: 6471 if isinstance(text, str): 6472 if charset is None: 6473 text = unicode(text, "utf-8", errors) 6474 else: 6475 text = unicode(text, charset, errors) 6476 else: 6477 raise Exception("Unsupported mail text type %s" % type(text)) 6478 return text.encode("utf-8")
6479
6480 - def get_charset(self, message):
6481 charset = message.get_content_charset() 6482 return charset
6483
6484 - def get_mailboxes(self):
6485 """ Query the mail database for mailbox names """ 6486 if self.static_names: 6487 # statically defined mailbox names 6488 self.connection.mailbox_names = self.static_names 6489 return self.static_names.keys() 6490 6491 mailboxes_list = self.connection.list() 6492 self.connection.mailbox_names = dict() 6493 mailboxes = list() 6494 x = 0 6495 for item in mailboxes_list[1]: 6496 x = x + 1 6497 item = item.strip() 6498 if not "NOSELECT" in item.upper(): 6499 sub_items = item.split("\"") 6500 sub_items = [sub_item for sub_item in sub_items \ 6501 if len(sub_item.strip()) > 0] 6502 # mailbox = sub_items[len(sub_items) -1] 6503 mailbox = sub_items[-1].strip() 6504 # remove unwanted characters and store original names 6505 # Don't allow leading non alphabetic characters 6506 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6507 mailboxes.append(mailbox_name) 6508 self.connection.mailbox_names[mailbox_name] = mailbox 6509 6510 return mailboxes
6511
6512 - def get_query_mailbox(self, query):
6513 nofield = True 6514 tablename = None 6515 attr = query 6516 while nofield: 6517 if hasattr(attr, "first"): 6518 attr = attr.first 6519 if isinstance(attr, Field): 6520 return attr.tablename 6521 elif isinstance(attr, Query): 6522 pass 6523 else: 6524 return None 6525 else: 6526 return None 6527 return tablename
6528
6529 - def is_flag(self, flag):
6530 if self.search_fields.get(flag, None) in self.flags.values(): 6531 return True 6532 else: 6533 return False
6534
6535 - def define_tables(self, mailbox_names=None):
6536 """ 6537 Auto create common IMAP fileds 6538 6539 This function creates fields definitions "statically" 6540 meaning that custom fields as in other adapters should 6541 not be supported and definitions handled on a service/mode 6542 basis (local syntax for Gmail(r), Ymail(r) 6543 6544 Returns a dictionary with tablename, server native mailbox name 6545 pairs. 6546 """ 6547 if mailbox_names: 6548 # optional statically declared mailboxes 6549 self.static_names = mailbox_names 6550 else: 6551 self.static_names = None 6552 if not isinstance(self.connection.mailbox_names, dict): 6553 self.get_mailboxes() 6554 6555 names = self.connection.mailbox_names.keys() 6556 6557 for name in names: 6558 self.db.define_table("%s" % name, 6559 Field("uid", writable=False), 6560 Field("created", "datetime", writable=False), 6561 Field("content", "text", writable=False), 6562 Field("to", writable=False), 6563 Field("cc", writable=False), 6564 Field("bcc", writable=False), 6565 Field("sender", writable=False), 6566 Field("size", "integer", writable=False), 6567 Field("subject", writable=False), 6568 Field("mime", writable=False), 6569 Field("email", "text", writable=False, readable=False), 6570 Field("attachments", "text", writable=False, readable=False), 6571 Field("encoding", writable=False), 6572 Field("answered", "boolean"), 6573 Field("deleted", "boolean"), 6574 Field("draft", "boolean"), 6575 Field("flagged", "boolean"), 6576 Field("recent", "boolean", writable=False), 6577 Field("seen", "boolean") 6578 ) 6579 6580 # Set a special _mailbox attribute for storing 6581 # native mailbox names 6582 self.db[name].mailbox = \ 6583 self.connection.mailbox_names[name] 6584 6585 # decode quoted printable 6586 self.db[name].to.represent = self.db[name].cc.represent = \ 6587 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6588 self.db[name].subject.represent = self.header_represent 6589 6590 # Set the db instance mailbox collections 6591 self.db.mailboxes = self.connection.mailbox_names 6592 return self.db.mailboxes
6593
6594 - def create_table(self, *args, **kwargs):
6595 # not implemented 6596 # but required by DAL 6597 pass
6598
6599 - def select(self, query, fields, attributes):
6600 """ Search and Fetch records and return web2py rows 6601 """ 6602 # move this statement elsewhere (upper-level) 6603 if use_common_filters(query): 6604 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6605 6606 import email 6607 # get records from imap server with search + fetch 6608 # convert results to a dictionary 6609 tablename = None 6610 fetch_results = list() 6611 6612 if isinstance(query, Query): 6613 tablename = self.get_table(query) 6614 mailbox = self.connection.mailbox_names.get(tablename, None) 6615 if mailbox is None: 6616 raise ValueError("Mailbox name not found: %s" % mailbox) 6617 else: 6618 # select with readonly 6619 result, selected = self.connection.select(mailbox, True) 6620 if result != "OK": 6621 raise Exception("IMAP error: %s" % selected) 6622 self.mailbox_size = int(selected[0]) 6623 search_query = "(%s)" % str(query).strip() 6624 search_result = self.connection.uid("search", None, search_query) 6625 # Normal IMAP response OK is assumed (change this) 6626 if search_result[0] == "OK": 6627 # For "light" remote server responses just get the first 6628 # ten records (change for non-experimental implementation) 6629 # However, light responses are not guaranteed with this 6630 # approach, just fewer messages. 6631 limitby = attributes.get('limitby', None) 6632 messages_set = search_result[1][0].split() 6633 # descending order 6634 messages_set.reverse() 6635 if limitby is not None: 6636 # TODO: orderby, asc/desc, limitby from complete message set 6637 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6638 6639 # keep the requests small for header/flags 6640 if any([(field.name in ["content", "size", 6641 "attachments", "email"]) for 6642 field in fields]): 6643 imap_fields = "(RFC822 FLAGS)" 6644 else: 6645 imap_fields = "(RFC822.HEADER FLAGS)" 6646 6647 if len(messages_set) > 0: 6648 # create fetch results object list 6649 # fetch each remote message and store it in memmory 6650 # (change to multi-fetch command syntax for faster 6651 # transactions) 6652 for uid in messages_set: 6653 # fetch the RFC822 message body 6654 typ, data = self.connection.uid("fetch", uid, imap_fields) 6655 if typ == "OK": 6656 fr = {"message": int(data[0][0].split()[0]), 6657 "uid": long(uid), 6658 "email": email.message_from_string(data[0][1]), 6659 "raw_message": data[0][1]} 6660 fr["multipart"] = fr["email"].is_multipart() 6661 # fetch flags for the message 6662 fr["flags"] = self.driver.ParseFlags(data[1]) 6663 fetch_results.append(fr) 6664 else: 6665 # error retrieving the message body 6666 raise Exception("IMAP error retrieving the body: %s" % data) 6667 else: 6668 raise Exception("IMAP search error: %s" % search_result[1]) 6669 elif isinstance(query, (Expression, basestring)): 6670 raise NotImplementedError() 6671 else: 6672 raise TypeError("Unexpected query type") 6673 6674 imapqry_dict = {} 6675 imapfields_dict = {} 6676 6677 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6678 allfields = True 6679 elif len(fields) == 0: 6680 allfields = True 6681 else: 6682 allfields = False 6683 if allfields: 6684 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6685 else: 6686 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6687 6688 for k in colnames: 6689 imapfields_dict[k] = k 6690 6691 imapqry_list = list() 6692 imapqry_array = list() 6693 for fr in fetch_results: 6694 attachments = [] 6695 content = [] 6696 size = 0 6697 n = int(fr["message"]) 6698 item_dict = dict() 6699 message = fr["email"] 6700 uid = fr["uid"] 6701 charset = self.get_charset(message) 6702 flags = fr["flags"] 6703 raw_message = fr["raw_message"] 6704 # Return messages data mapping static fields 6705 # and fetched results. Mapping should be made 6706 # outside the select function (with auxiliary 6707 # instance methods) 6708 6709 # pending: search flags states trough the email message 6710 # instances for correct output 6711 6712 # preserve subject encoding (ASCII/quoted printable) 6713 6714 if "%s.id" % tablename in colnames: 6715 item_dict["%s.id" % tablename] = n 6716 if "%s.created" % tablename in colnames: 6717 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6718 if "%s.uid" % tablename in colnames: 6719 item_dict["%s.uid" % tablename] = uid 6720 if "%s.sender" % tablename in colnames: 6721 # If there is no encoding found in the message header 6722 # force utf-8 replacing characters (change this to 6723 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6724 item_dict["%s.sender" % tablename] = message["From"] 6725 if "%s.to" % tablename in colnames: 6726 item_dict["%s.to" % tablename] = message["To"] 6727 if "%s.cc" % tablename in colnames: 6728 if "Cc" in message.keys(): 6729 item_dict["%s.cc" % tablename] = message["Cc"] 6730 else: 6731 item_dict["%s.cc" % tablename] = "" 6732 if "%s.bcc" % tablename in colnames: 6733 if "Bcc" in message.keys(): 6734 item_dict["%s.bcc" % tablename] = message["Bcc"] 6735 else: 6736 item_dict["%s.bcc" % tablename] = "" 6737 if "%s.deleted" % tablename in colnames: 6738 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6739 if "%s.draft" % tablename in colnames: 6740 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6741 if "%s.flagged" % tablename in colnames: 6742 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6743 if "%s.recent" % tablename in colnames: 6744 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6745 if "%s.seen" % tablename in colnames: 6746 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6747 if "%s.subject" % tablename in colnames: 6748 item_dict["%s.subject" % tablename] = message["Subject"] 6749 if "%s.answered" % tablename in colnames: 6750 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6751 if "%s.mime" % tablename in colnames: 6752 item_dict["%s.mime" % tablename] = message.get_content_type() 6753 if "%s.encoding" % tablename in colnames: 6754 item_dict["%s.encoding" % tablename] = charset 6755 6756 # Here goes the whole RFC822 body as an email instance 6757 # for controller side custom processing 6758 # The message is stored as a raw string 6759 # >> email.message_from_string(raw string) 6760 # returns a Message object for enhanced object processing 6761 if "%s.email" % tablename in colnames: 6762 # WARNING: no encoding performed (raw message) 6763 item_dict["%s.email" % tablename] = raw_message 6764 6765 # Size measure as suggested in a Velocity Reviews post 6766 # by Tim Williams: "how to get size of email attachment" 6767 # Note: len() and server RFC822.SIZE reports doesn't match 6768 # To retrieve the server size for representation would add a new 6769 # fetch transaction to the process 6770 for part in message.walk(): 6771 maintype = part.get_content_maintype() 6772 if ("%s.attachments" % tablename in colnames) or \ 6773 ("%s.content" % tablename in colnames): 6774 payload = part.get_payload(decode=True) 6775 if payload: 6776 filename = part.get_filename() 6777 values = {"mime": part.get_content_type()} 6778 if ((filename or not "text" in maintype) and 6779 ("%s.attachments" % tablename in colnames)): 6780 values.update({"payload": payload, 6781 "filename": filename, 6782 "encoding": part.get_content_charset(), 6783 "disposition": part["Content-Disposition"]}) 6784 attachments.append(values) 6785 elif (("text" in maintype) and 6786 ("%s.content" % tablename in colnames)): 6787 values.update({"text": self.encode_text(payload, 6788 self.get_charset(part))}) 6789 content.append(values) 6790 6791 if "%s.size" % tablename in colnames: 6792 if part is not None: 6793 size += len(str(part)) 6794 item_dict["%s.content" % tablename] = content 6795 item_dict["%s.attachments" % tablename] = attachments 6796 item_dict["%s.size" % tablename] = size 6797 imapqry_list.append(item_dict) 6798 6799 # extra object mapping for the sake of rows object 6800 # creation (sends an array or lists) 6801 for item_dict in imapqry_list: 6802 imapqry_array_item = list() 6803 for fieldname in colnames: 6804 imapqry_array_item.append(item_dict[fieldname]) 6805 imapqry_array.append(imapqry_array_item) 6806 6807 # parse result and return a rows object 6808 colnames = colnames 6809 processor = attributes.get('processor',self.parse) 6810 return processor(imapqry_array, fields, colnames)
6811
6812 - def insert(self, table, fields):
6813 def add_payload(message, obj): 6814 payload = Message() 6815 encoding = obj.get("encoding", "utf-8") 6816 if encoding and (encoding.upper() in 6817 ("BASE64", "7BIT", "8BIT", "BINARY")): 6818 payload.add_header("Content-Transfer-Encoding", encoding) 6819 else: 6820 payload.set_charset(encoding) 6821 mime = obj.get("mime", None) 6822 if mime: 6823 payload.set_type(mime) 6824 if "text" in obj: 6825 payload.set_payload(obj["text"]) 6826 elif "payload" in obj: 6827 payload.set_payload(obj["payload"]) 6828 if "filename" in obj and obj["filename"]: 6829 payload.add_header("Content-Disposition", 6830 "attachment", filename=obj["filename"]) 6831 message.attach(payload)
6832 6833 mailbox = table.mailbox 6834 d = dict(((k.name, v) for k, v in fields)) 6835 date_time = d.get("created") or datetime.datetime.now() 6836 struct_time = date_time.timetuple() 6837 if len(d) > 0: 6838 message = d.get("email", None) 6839 attachments = d.get("attachments", []) 6840 content = d.get("content", []) 6841 flags = " ".join(["\\%s" % flag.capitalize() for flag in 6842 ("answered", "deleted", "draft", "flagged", 6843 "recent", "seen") if d.get(flag, False)]) 6844 if not message: 6845 from email.message import Message 6846 mime = d.get("mime", None) 6847 charset = d.get("encoding", None) 6848 message = Message() 6849 message["from"] = d.get("sender", "") 6850 message["subject"] = d.get("subject", "") 6851 message["date"] = self.convert_date(date_time, imf=True) 6852 6853 if mime: 6854 message.set_type(mime) 6855 if charset: 6856 message.set_charset(charset) 6857 for item in ("to", "cc", "bcc"): 6858 value = d.get(item, "") 6859 if isinstance(value, basestring): 6860 message[item] = value 6861 else: 6862 message[item] = ";".join([i for i in 6863 value]) 6864 if (not message.is_multipart() and 6865 (not message.get_content_type().startswith( 6866 "multipart"))): 6867 if isinstance(content, basestring): 6868 message.set_payload(content) 6869 elif len(content) > 0: 6870 message.set_payload(content[0]["text"]) 6871 else: 6872 [add_payload(message, c) for c in content] 6873 [add_payload(message, a) for a in attachments] 6874 message = message.as_string() 6875 6876 result, data = self.connection.append(mailbox, flags, struct_time, message) 6877 if result == "OK": 6878 uid = int(re.findall("\d+", str(data))[-1]) 6879 return self.db(table.uid==uid).select(table.id).first().id 6880 else: 6881 raise Exception("IMAP message append failed: %s" % data) 6882 else: 6883 raise NotImplementedError("IMAP empty insert is not implemented") 6884
6885 - def update(self, tablename, query, fields):
6886 # TODO: the adapter should implement an .expand method 6887 commands = list() 6888 rowcount = 0 6889 if use_common_filters(query): 6890 query = self.common_filter(query, [tablename,]) 6891 mark = [] 6892 unmark = [] 6893 if query: 6894 for item in fields: 6895 field = item[0] 6896 name = field.name 6897 value = item[1] 6898 if self.is_flag(name): 6899 flag = self.search_fields[name] 6900 if (value is not None) and (flag != "\\Recent"): 6901 if value: 6902 mark.append(flag) 6903 else: 6904 unmark.append(flag) 6905 result, data = self.connection.select( 6906 self.connection.mailbox_names[tablename]) 6907 string_query = "(%s)" % query 6908 result, data = self.connection.search(None, string_query) 6909 store_list = [item.strip() for item in data[0].split() 6910 if item.strip().isdigit()] 6911 # build commands for marked flags 6912 for number in store_list: 6913 result = None 6914 if len(mark) > 0: 6915 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6916 if len(unmark) > 0: 6917 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6918 6919 for command in commands: 6920 result, data = self.connection.store(*command) 6921 if result == "OK": 6922 rowcount += 1 6923 else: 6924 raise Exception("IMAP storing error: %s" % data) 6925 return rowcount
6926
6927 - def count(self,query,distinct=None):
6928 counter = 0 6929 tablename = self.get_query_mailbox(query) 6930 if query and tablename is not None: 6931 if use_common_filters(query): 6932 query = self.common_filter(query, [tablename,]) 6933 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6934 string_query = "(%s)" % query 6935 result, data = self.connection.search(None, string_query) 6936 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6937 counter = len(store_list) 6938 return counter
6939
6940 - def delete(self, tablename, query):
6941 counter = 0 6942 if query: 6943 if use_common_filters(query): 6944 query = self.common_filter(query, [tablename,]) 6945 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6946 string_query = "(%s)" % query 6947 result, data = self.connection.search(None, string_query) 6948 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6949 for number in store_list: 6950 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6951 if result == "OK": 6952 counter += 1 6953 else: 6954 raise Exception("IMAP store error: %s" % data) 6955 if counter > 0: 6956 result, data = self.connection.expunge() 6957 return counter
6958
6959 - def BELONGS(self, first, second):
6960 result = None 6961 name = self.search_fields[first.name] 6962 if name == "MESSAGE": 6963 values = [str(val) for val in second if str(val).isdigit()] 6964 result = "%s" % ",".join(values).strip() 6965 6966 elif name == "UID": 6967 values = [str(val) for val in second if str(val).isdigit()] 6968 result = "UID %s" % ",".join(values).strip() 6969 6970 else: 6971 raise Exception("Operation not supported") 6972 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6973 return result
6974
6975 - def CONTAINS(self, first, second, case_sensitive=False):
6976 # silently ignore, only case sensitive 6977 result = None 6978 name = self.search_fields[first.name] 6979 6980 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6981 result = "%s \"%s\"" % (name, self.expand(second)) 6982 else: 6983 if first.name in ("cc", "bcc"): 6984 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6985 elif first.name == "mime": 6986 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6987 else: 6988 raise Exception("Operation not supported") 6989 return result
6990
6991 - def GT(self, first, second):
6992 result = None 6993 name = self.search_fields[first.name] 6994 if name == "MESSAGE": 6995 last_message = self.get_last_message(first.tablename) 6996 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6997 elif name == "UID": 6998 # GT and LT may not return 6999 # expected sets depending on 7000 # the uid format implemented 7001 try: 7002 pedestal, threshold = self.get_uid_bounds(first.tablename) 7003 except TypeError: 7004 e = sys.exc_info()[1] 7005 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 7006 return "" 7007 try: 7008 lower_limit = int(self.expand(second)) + 1 7009 except (ValueError, TypeError): 7010 e = sys.exc_info()[1] 7011 raise Exception("Operation not supported (non integer UID)") 7012 result = "UID %s:%s" % (lower_limit, threshold) 7013 elif name == "DATE": 7014 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 7015 elif name == "SIZE": 7016 result = "LARGER %s" % self.expand(second) 7017 else: 7018 raise Exception("Operation not supported") 7019 return result
7020
7021 - def GE(self, first, second):
7022 result = None 7023 name = self.search_fields[first.name] 7024 if name == "MESSAGE": 7025 last_message = self.get_last_message(first.tablename) 7026 result = "%s:%s" % (self.expand(second), last_message) 7027 elif name == "UID": 7028 # GT and LT may not return 7029 # expected sets depending on 7030 # the uid format implemented 7031 try: 7032 pedestal, threshold = self.get_uid_bounds(first.tablename) 7033 except TypeError: 7034 e = sys.exc_info()[1] 7035 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 7036 return "" 7037 lower_limit = self.expand(second) 7038 result = "UID %s:%s" % (lower_limit, threshold) 7039 elif name == "DATE": 7040 result = "SINCE %s" % self.convert_date(second) 7041 else: 7042 raise Exception("Operation not supported") 7043 return result
7044
7045 - def LT(self, first, second):
7046 result = None 7047 name = self.search_fields[first.name] 7048 if name == "MESSAGE": 7049 result = "%s:%s" % (1, int(self.expand(second)) - 1) 7050 elif name == "UID": 7051 try: 7052 pedestal, threshold = self.get_uid_bounds(first.tablename) 7053 except TypeError: 7054 e = sys.exc_info()[1] 7055 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 7056 return "" 7057 try: 7058 upper_limit = int(self.expand(second)) - 1 7059 except (ValueError, TypeError): 7060 e = sys.exc_info()[1] 7061 raise Exception("Operation not supported (non integer UID)") 7062 result = "UID %s:%s" % (pedestal, upper_limit) 7063 elif name == "DATE": 7064 result = "BEFORE %s" % self.convert_date(second) 7065 elif name == "SIZE": 7066 result = "SMALLER %s" % self.expand(second) 7067 else: 7068 raise Exception("Operation not supported") 7069 return result
7070
7071 - def LE(self, first, second):
7072 result = None 7073 name = self.search_fields[first.name] 7074 if name == "MESSAGE": 7075 result = "%s:%s" % (1, self.expand(second)) 7076 elif name == "UID": 7077 try: 7078 pedestal, threshold = self.get_uid_bounds(first.tablename) 7079 except TypeError: 7080 e = sys.exc_info()[1] 7081 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 7082 return "" 7083 upper_limit = int(self.expand(second)) 7084 result = "UID %s:%s" % (pedestal, upper_limit) 7085 elif name == "DATE": 7086 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 7087 else: 7088 raise Exception("Operation not supported") 7089 return result
7090
7091 - def NE(self, first, second=None):
7092 if (second is None) and isinstance(first, Field): 7093 # All records special table query 7094 if first.type == "id": 7095 return self.GE(first, 1) 7096 result = self.NOT(self.EQ(first, second)) 7097 result = result.replace("NOT NOT", "").strip() 7098 return result
7099
7100 - def EQ(self,first,second):
7101 name = self.search_fields[first.name] 7102 result = None 7103 if name is not None: 7104 if name == "MESSAGE": 7105 # query by message sequence number 7106 result = "%s" % self.expand(second) 7107 elif name == "UID": 7108 result = "UID %s" % self.expand(second) 7109 elif name == "DATE": 7110 result = "ON %s" % self.convert_date(second) 7111 7112 elif name in self.flags.values(): 7113 if second: 7114 result = "%s" % (name.upper()[1:]) 7115 else: 7116 result = "NOT %s" % (name.upper()[1:]) 7117 else: 7118 raise Exception("Operation not supported") 7119 else: 7120 raise Exception("Operation not supported") 7121 return result
7122
7123 - def AND(self, first, second):
7124 result = "%s %s" % (self.expand(first), self.expand(second)) 7125 return result
7126
7127 - def OR(self, first, second):
7128 result = "OR %s %s" % (self.expand(first), self.expand(second)) 7129 return "%s" % result.replace("OR OR", "OR")
7130
7131 - def NOT(self, first):
7132 result = "NOT %s" % self.expand(first) 7133 return result
7134 7135 ######################################################################## 7136 # end of adapters 7137 ######################################################################## 7138 7139 ADAPTERS = { 7140 'sqlite': SQLiteAdapter, 7141 'spatialite': SpatiaLiteAdapter, 7142 'sqlite:memory': SQLiteAdapter, 7143 'spatialite:memory': SpatiaLiteAdapter, 7144 'mysql': MySQLAdapter, 7145 'postgres': PostgreSQLAdapter, 7146 'postgres:psycopg2': PostgreSQLAdapter, 7147 'postgres:pg8000': PostgreSQLAdapter, 7148 'postgres2:psycopg2': NewPostgreSQLAdapter, 7149 'postgres2:pg8000': NewPostgreSQLAdapter, 7150 'oracle': OracleAdapter, 7151 'mssql': MSSQLAdapter, 7152 'mssql2': MSSQL2Adapter, 7153 'mssql3': MSSQL3Adapter, 7154 'mssql4' : MSSQL4Adapter, 7155 'vertica': VerticaAdapter, 7156 'sybase': SybaseAdapter, 7157 'db2': DB2Adapter, 7158 'teradata': TeradataAdapter, 7159 'informix': InformixAdapter, 7160 'informix-se': InformixSEAdapter, 7161 'firebird': FireBirdAdapter, 7162 'firebird_embedded': FireBirdAdapter, 7163 'ingres': IngresAdapter, 7164 'ingresu': IngresUnicodeAdapter, 7165 'sapdb': SAPDBAdapter, 7166 'cubrid': CubridAdapter, 7167 'jdbc:sqlite': JDBCSQLiteAdapter, 7168 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 7169 'jdbc:postgres': JDBCPostgreSQLAdapter, 7170 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 7171 'google:datastore': GoogleDatastoreAdapter, 7172 'google:sql': GoogleSQLAdapter, 7173 'couchdb': CouchDBAdapter, 7174 'mongodb': MongoDBAdapter, 7175 'imap': IMAPAdapter 7176 }
7177 7178 -def sqlhtml_validators(field):
7179 """ 7180 Field type validation, using web2py's validators mechanism. 7181 7182 makes sure the content of a field is in line with the declared 7183 fieldtype 7184 """ 7185 db = field.db 7186 try: 7187 from gluon import validators 7188 except ImportError: 7189 return [] 7190 field_type, field_length = field.type, field.length 7191 if isinstance(field_type, SQLCustomType): 7192 if hasattr(field_type, 'validator'): 7193 return field_type.validator 7194 else: 7195 field_type = field_type.type 7196 elif not isinstance(field_type,str): 7197 return [] 7198 requires=[] 7199 def ff(r,id): 7200 row=r(id) 7201 if not row: 7202 return id 7203 elif hasattr(r, '_format') and isinstance(r._format,str): 7204 return r._format % row 7205 elif hasattr(r, '_format') and callable(r._format): 7206 return r._format(row) 7207 else: 7208 return id
7209 if field_type in (('string', 'text', 'password')): 7210 requires.append(validators.IS_LENGTH(field_length)) 7211 elif field_type == 'json': 7212 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON(native_json=field.db._adapter.native_json))) 7213 elif field_type == 'double' or field_type == 'float': 7214 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 7215 elif field_type == 'integer': 7216 requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31)) 7217 elif field_type == 'bigint': 7218 requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63)) 7219 elif field_type.startswith('decimal'): 7220 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 7221 elif field_type == 'date': 7222 requires.append(validators.IS_DATE()) 7223 elif field_type == 'time': 7224 requires.append(validators.IS_TIME()) 7225 elif field_type == 'datetime': 7226 requires.append(validators.IS_DATETIME()) 7227 elif db and field_type.startswith('reference') and \ 7228 field_type.find('.') < 0 and \ 7229 field_type[10:] in db.tables: 7230 referenced = db[field_type[10:]] 7231 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 7232 field.represent = field.represent or repr_ref 7233 if hasattr(referenced, '_format') and referenced._format: 7234 requires = validators.IS_IN_DB(db,referenced._id, 7235 referenced._format) 7236 if field.unique: 7237 requires._and = validators.IS_NOT_IN_DB(db,field) 7238 if field.tablename == field_type[10:]: 7239 return validators.IS_EMPTY_OR(requires) 7240 return requires 7241 elif db and field_type.startswith('list:reference') and \ 7242 field_type.find('.') < 0 and \ 7243 field_type[15:] in db.tables: 7244 referenced = db[field_type[15:]] 7245 def list_ref_repr(ids, row=None, r=referenced, f=ff): 7246 if not ids: 7247 return None 7248 refs = None 7249 db, id = r._db, r._id 7250 if isinstance(db._adapter, GoogleDatastoreAdapter): 7251 def count(values): return db(id.belongs(values)).select(id) 7252 rx = range(0, len(ids), 30) 7253 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 7254 else: 7255 refs = db(id.belongs(ids)).select(id) 7256 return (refs and ', '.join(f(r,x.id) for x in refs) or '') 7257 field.represent = field.represent or list_ref_repr 7258 if hasattr(referenced, '_format') and referenced._format: 7259 requires = validators.IS_IN_DB(db,referenced._id, 7260 referenced._format,multiple=True) 7261 else: 7262 requires = validators.IS_IN_DB(db,referenced._id, 7263 multiple=True) 7264 if field.unique: 7265 requires._and = validators.IS_NOT_IN_DB(db,field) 7266 if not field.notnull: 7267 requires = validators.IS_EMPTY_OR(requires) 7268 return requires 7269 elif field_type.startswith('list:'): 7270 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 7271 field.represent = field.represent or repr_list 7272 if field.unique: 7273 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 7274 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 7275 if field.notnull and not field_type[:2] in sff: 7276 requires.insert(0, validators.IS_NOT_EMPTY()) 7277 elif not field.notnull and field_type[:2] in sff and requires: 7278 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 7279 return requires 7280
7281 7282 -def bar_escape(item):
7283 return str(item).replace('|', '||')
7284
7285 -def bar_encode(items):
7286 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
7287
7288 -def bar_decode_integer(value):
7289 if not hasattr(value,'split') and hasattr(value,'read'): 7290 value = value.read() 7291 return [long(x) for x in value.split('|') if x.strip()]
7292
7293 -def bar_decode_string(value):
7294 return [x.replace('||', '|') for x in 7295 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
7296
7297 7298 -class Row(object):
7299 7300 """ 7301 a dictionary that lets you do d['a'] as well as d.a 7302 this is only used to store a Row 7303 """ 7304 7305 __init__ = lambda self,*args,**kwargs: self.__dict__.update(*args,**kwargs) 7306
7307 - def __getitem__(self, k):
7308 if isinstance(k, Table): 7309 try: 7310 return ogetattr(self, k._tablename) 7311 except (KeyError,AttributeError,TypeError): 7312 pass 7313 elif isinstance(k, Field): 7314 try: 7315 return ogetattr(self, k.name) 7316 except (KeyError,AttributeError,TypeError): 7317 pass 7318 try: 7319 return ogetattr(ogetattr(self, k.tablename), k.name) 7320 except (KeyError,AttributeError,TypeError): 7321 pass 7322 7323 key=str(k) 7324 _extra = ogetattr(self, '__dict__').get('_extra', None) 7325 if _extra is not None: 7326 v = _extra.get(key, DEFAULT) 7327 if v != DEFAULT: 7328 return v 7329 try: 7330 return ogetattr(self, key) 7331 except (KeyError,AttributeError,TypeError): 7332 pass 7333 7334 m = REGEX_TABLE_DOT_FIELD.match(key) 7335 if m: 7336 try: 7337 return ogetattr(self, m.group(1))[m.group(2)] 7338 except (KeyError,AttributeError,TypeError): 7339 key = m.group(2) 7340 try: 7341 return ogetattr(self, key) 7342 except (KeyError,AttributeError,TypeError), ae: 7343 try: 7344 self[key] = ogetattr(self,'__get_lazy_reference__')(key) 7345 return self[key] 7346 except: 7347 raise ae
7348 7349 __setitem__ = lambda self, key, value: setattr(self, str(key), value) 7350 7351 __delitem__ = object.__delattr__ 7352 7353 __copy__ = lambda self: Row(self) 7354 7355 __call__ = __getitem__ 7356 7357
7358 - def get(self, key, default=None):
7359 try: 7360 return self.__getitem__(key) 7361 except(KeyError, AttributeError, TypeError): 7362 return self.__dict__.get(key,default)
7363 7364 has_key = __contains__ = lambda self, key: key in self.__dict__ 7365 7366 __nonzero__ = lambda self: len(self.__dict__)>0 7367 7368 update = lambda self, *args, **kwargs: self.__dict__.update(*args, **kwargs) 7369 7370 keys = lambda self: self.__dict__.keys() 7371 7372 items = lambda self: self.__dict__.items() 7373 7374 values = lambda self: self.__dict__.values() 7375 7376 __iter__ = lambda self: self.__dict__.__iter__() 7377 7378 iteritems = lambda self: self.__dict__.iteritems() 7379 7380 __str__ = __repr__ = lambda self: '<Row %s>' % self.as_dict() 7381 7382 __int__ = lambda self: object.__getattribute__(self,'id') 7383 7384 __long__ = lambda self: long(object.__getattribute__(self,'id')) 7385 7386 __getattr__ = __getitem__ 7387 7388 # def __getattribute__(self, key): 7389 # try: 7390 # return object.__getattribute__(self, key) 7391 # except AttributeError, ae: 7392 # try: 7393 # return self.__get_lazy_reference__(key) 7394 # except: 7395 # raise ae 7396
7397 - def __eq__(self,other):
7398 try: 7399 return self.as_dict() == other.as_dict() 7400 except AttributeError: 7401 return False
7402
7403 - def __ne__(self,other):
7404 return not (self == other)
7405
7406 - def __copy__(self):
7407 return Row(dict(self))
7408
7409 - def as_dict(self, datetime_to_str=False, custom_types=None):
7410 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 7411 if isinstance(custom_types,(list,tuple,set)): 7412 SERIALIZABLE_TYPES += custom_types 7413 elif custom_types: 7414 SERIALIZABLE_TYPES.append(custom_types) 7415 d = dict(self) 7416 for k in copy.copy(d.keys()): 7417 v=d[k] 7418 if d[k] is None: 7419 continue 7420 elif isinstance(v,Row): 7421 d[k]=v.as_dict() 7422 elif isinstance(v,Reference): 7423 d[k]=long(v) 7424 elif isinstance(v,decimal.Decimal): 7425 d[k]=float(v) 7426 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 7427 if datetime_to_str: 7428 d[k] = v.isoformat().replace('T',' ')[:19] 7429 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 7430 del d[k] 7431 return d
7432
7433 - def as_xml(self, row_name="row", colnames=None, indent=' '):
7434 def f(row,field,indent=' '): 7435 if isinstance(row,Row): 7436 spc = indent+' \n' 7437 items = [f(row[x],x,indent+' ') for x in row] 7438 return '%s<%s>\n%s\n%s</%s>' % ( 7439 indent, 7440 field, 7441 spc.join(item for item in items if item), 7442 indent, 7443 field) 7444 elif not callable(row): 7445 if REGEX_ALPHANUMERIC.match(field): 7446 return '%s<%s>%s</%s>' % (indent,field,row,field) 7447 else: 7448 return '%s<extra name="%s">%s</extra>' % \ 7449 (indent,field,row) 7450 else: 7451 return None
7452 return f(self, row_name, indent=indent)
7453
7454 - def as_json(self, mode="object", default=None, colnames=None, 7455 serialize=True, **kwargs):
7456 """ 7457 serializes the row to a JSON object 7458 kwargs are passed to .as_dict method 7459 only "object" mode supported 7460 7461 serialize = False used by Rows.as_json 7462 TODO: return array mode with query column order 7463 7464 mode and colnames are not implemented 7465 """ 7466 7467 item = self.as_dict(**kwargs) 7468 if serialize: 7469 if have_serializers: 7470 return serializers.json(item, 7471 default=default or 7472 serializers.custom_json) 7473 elif simplejson: 7474 return simplejson.dumps(item) 7475 else: 7476 raise RuntimeError("missing simplejson") 7477 else: 7478 return item
7479
7480 7481 ################################################################################ 7482 # Everything below should be independent of the specifics of the database 7483 # and should work for RDBMs and some NoSQL databases 7484 ################################################################################ 7485 7486 -class SQLCallableList(list):
7487 - def __call__(self):
7488 return copy.copy(self)
7489
7490 -def smart_query(fields,text):
7491 if not isinstance(fields,(list,tuple)): 7492 fields = [fields] 7493 new_fields = [] 7494 for field in fields: 7495 if isinstance(field,Field): 7496 new_fields.append(field) 7497 elif isinstance(field,Table): 7498 for ofield in field: 7499 new_fields.append(ofield) 7500 else: 7501 raise RuntimeError("fields must be a list of fields") 7502 fields = new_fields 7503 field_map = {} 7504 for field in fields: 7505 n = field.name.lower() 7506 if not n in field_map: 7507 field_map[n] = field 7508 n = str(field).lower() 7509 if not n in field_map: 7510 field_map[n] = field 7511 constants = {} 7512 i = 0 7513 while True: 7514 m = REGEX_CONST_STRING.search(text) 7515 if not m: break 7516 text = text[:m.start()]+('#%i' % i)+text[m.end():] 7517 constants[str(i)] = m.group()[1:-1] 7518 i+=1 7519 text = re.sub('\s+',' ',text).lower() 7520 for a,b in [('&','and'), 7521 ('|','or'), 7522 ('~','not'), 7523 ('==','='), 7524 ('<','<'), 7525 ('>','>'), 7526 ('<=','<='), 7527 ('>=','>='), 7528 ('<>','!='), 7529 ('=<','<='), 7530 ('=>','>='), 7531 ('=','='), 7532 (' less or equal than ','<='), 7533 (' greater or equal than ','>='), 7534 (' equal or less than ','<='), 7535 (' equal or greater than ','>='), 7536 (' less or equal ','<='), 7537 (' greater or equal ','>='), 7538 (' equal or less ','<='), 7539 (' equal or greater ','>='), 7540 (' not equal to ','!='), 7541 (' not equal ','!='), 7542 (' equal to ','='), 7543 (' equal ','='), 7544 (' equals ','='), 7545 (' less than ','<'), 7546 (' greater than ','>'), 7547 (' starts with ','startswith'), 7548 (' ends with ','endswith'), 7549 (' not in ' , 'notbelongs'), 7550 (' in ' , 'belongs'), 7551 (' is ','=')]: 7552 if a[0]==' ': 7553 text = text.replace(' is'+a,' %s ' % b) 7554 text = text.replace(a,' %s ' % b) 7555 text = re.sub('\s+',' ',text).lower() 7556 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 7557 query = field = neg = op = logic = None 7558 for item in text.split(): 7559 if field is None: 7560 if item == 'not': 7561 neg = True 7562 elif not neg and not logic and item in ('and','or'): 7563 logic = item 7564 elif item in field_map: 7565 field = field_map[item] 7566 else: 7567 raise RuntimeError("Invalid syntax") 7568 elif not field is None and op is None: 7569 op = item 7570 elif not op is None: 7571 if item.startswith('#'): 7572 if not item[1:] in constants: 7573 raise RuntimeError("Invalid syntax") 7574 value = constants[item[1:]] 7575 else: 7576 value = item 7577 if field.type in ('text', 'string', 'json'): 7578 if op == '=': op = 'like' 7579 if op == '=': new_query = field==value 7580 elif op == '<': new_query = field<value 7581 elif op == '>': new_query = field>value 7582 elif op == '<=': new_query = field<=value 7583 elif op == '>=': new_query = field>=value 7584 elif op == '!=': new_query = field!=value 7585 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7586 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7587 elif field.type in ('text', 'string', 'json'): 7588 if op == 'contains': new_query = field.contains(value) 7589 elif op == 'like': new_query = field.like(value) 7590 elif op == 'startswith': new_query = field.startswith(value) 7591 elif op == 'endswith': new_query = field.endswith(value) 7592 else: raise RuntimeError("Invalid operation") 7593 elif field._db._adapter.dbengine=='google:datastore' and \ 7594 field.type in ('list:integer', 'list:string', 'list:reference'): 7595 if op == 'contains': new_query = field.contains(value) 7596 else: raise RuntimeError("Invalid operation") 7597 else: raise RuntimeError("Invalid operation") 7598 if neg: new_query = ~new_query 7599 if query is None: 7600 query = new_query 7601 elif logic == 'and': 7602 query &= new_query 7603 elif logic == 'or': 7604 query |= new_query 7605 field = op = neg = logic = None 7606 return query
7607
7608 -class DAL(object):
7609 7610 """ 7611 an instance of this class represents a database connection 7612 7613 Example:: 7614 7615 db = DAL('sqlite://test.db') 7616 7617 or 7618 7619 db = DAL(**{"uri": ..., "tables": [...]...}) # experimental 7620 7621 db.define_table('tablename', Field('fieldname1'), 7622 Field('fieldname2')) 7623 """ 7624
7625 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7626 if not hasattr(THREAD_LOCAL,'db_instances'): 7627 THREAD_LOCAL.db_instances = {} 7628 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7629 THREAD_LOCAL.db_instances_zombie = {} 7630 if uri == '<zombie>': 7631 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7632 if db_uid in THREAD_LOCAL.db_instances: 7633 db_group = THREAD_LOCAL.db_instances[db_uid] 7634 db = db_group[-1] 7635 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7636 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7637 else: 7638 db = super(DAL, cls).__new__(cls) 7639 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7640 else: 7641 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7642 if db_uid in THREAD_LOCAL.db_instances_zombie: 7643 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7644 del THREAD_LOCAL.db_instances_zombie[db_uid] 7645 else: 7646 db = super(DAL, cls).__new__(cls) 7647 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7648 db_group.append(db) 7649 THREAD_LOCAL.db_instances[db_uid] = db_group 7650 db._db_uid = db_uid 7651 return db
7652 7653 @staticmethod
7654 - def set_folder(folder):
7655 """ 7656 # ## this allows gluon to set a folder for this thread 7657 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7658 """ 7659 BaseAdapter.set_folder(folder)
7660 7661 @staticmethod
7662 - def get_instances():
7663 """ 7664 Returns a dictionary with uri as key with timings and defined tables 7665 {'sqlite://storage.sqlite': { 7666 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7667 'dbtables': { 7668 'defined': ['auth_cas', 'auth_event', 'auth_group', 7669 'auth_membership', 'auth_permission', 'auth_user'], 7670 'lazy': '[]' 7671 } 7672 } 7673 } 7674 """ 7675 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7676 infos = {} 7677 for db_uid, db_group in dbs: 7678 for db in db_group: 7679 if not db._uri: 7680 continue 7681 k = hide_password(db._adapter.uri) 7682 infos[k] = dict( 7683 dbstats = [(row[0], row[1]) for row in db._timings], 7684 dbtables = {'defined': sorted( 7685 list(set(db.tables)-set(db._LAZY_TABLES.keys()))), 7686 'lazy': sorted(db._LAZY_TABLES.keys())}) 7687 return infos
7688 7689 @staticmethod
7690 - def distributed_transaction_begin(*instances):
7691 if not instances: 7692 return 7693 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7694 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7695 instances = enumerate(instances) 7696 for (i, db) in instances: 7697 if not db._adapter.support_distributed_transaction(): 7698 raise SyntaxError( 7699 'distributed transaction not suported by %s' % db._dbname) 7700 for (i, db) in instances: 7701 db._adapter.distributed_transaction_begin(keys[i])
7702 7703 @staticmethod
7704 - def distributed_transaction_commit(*instances):
7705 if not instances: 7706 return 7707 instances = enumerate(instances) 7708 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7709 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7710 for (i, db) in instances: 7711 if not db._adapter.support_distributed_transaction(): 7712 raise SyntaxError( 7713 'distributed transaction not suported by %s' % db._dbanme) 7714 try: 7715 for (i, db) in instances: 7716 db._adapter.prepare(keys[i]) 7717 except: 7718 for (i, db) in instances: 7719 db._adapter.rollback_prepared(keys[i]) 7720 raise RuntimeError('failure to commit distributed transaction') 7721 else: 7722 for (i, db) in instances: 7723 db._adapter.commit_prepared(keys[i]) 7724 return
7725
7726 - def __init__(self, uri=DEFAULT_URI, 7727 pool_size=0, folder=None, 7728 db_codec='UTF-8', check_reserved=None, 7729 migrate=True, fake_migrate=False, 7730 migrate_enabled=True, fake_migrate_all=False, 7731 decode_credentials=False, driver_args=None, 7732 adapter_args=None, attempts=5, auto_import=False, 7733 bigint_id=False, debug=False, lazy_tables=False, 7734 db_uid=None, do_connect=True, 7735 after_connection=None, tables=None, ignore_field_case=True, 7736 entity_quoting=False):
7737 """ 7738 Creates a new Database Abstraction Layer instance. 7739 7740 Keyword arguments: 7741 7742 :uri: string that contains information for connecting to a database. 7743 (default: 'sqlite://dummy.db') 7744 7745 experimental: you can specify a dictionary as uri 7746 parameter i.e. with 7747 db = DAL({"uri": "sqlite://storage.sqlite", 7748 "tables": {...}, ...}) 7749 7750 for an example of dict input you can check the output 7751 of the scaffolding db model with 7752 7753 db.as_dict() 7754 7755 Note that for compatibility with Python older than 7756 version 2.6.5 you should cast your dict input keys 7757 to str due to a syntax limitation on kwarg names. 7758 for proper DAL dictionary input you can use one of: 7759 7760 obj = serializers.cast_keys(dict, [encoding="utf-8"]) 7761 7762 or else (for parsing json input) 7763 7764 obj = serializers.loads_json(data, unicode_keys=False) 7765 7766 :pool_size: How many open connections to make to the database object. 7767 :folder: where .table files will be created. 7768 automatically set within web2py 7769 use an explicit path when using DAL outside web2py 7770 :db_codec: string encoding of the database (default: 'UTF-8') 7771 :check_reserved: list of adapters to check tablenames and column names 7772 against sql/nosql reserved keywords. (Default None) 7773 7774 * 'common' List of sql keywords that are common to all database types 7775 such as "SELECT, INSERT". (recommended) 7776 * 'all' Checks against all known SQL keywords. (not recommended) 7777 <adaptername> Checks against the specific adapters list of keywords 7778 (recommended) 7779 * '<adaptername>_nonreserved' Checks against the specific adapters 7780 list of nonreserved keywords. (if available) 7781 :migrate (defaults to True) sets default migrate behavior for all tables 7782 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7783 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7784 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7785 :attempts (defaults to 5). Number of times to attempt connecting 7786 :auto_import (defaults to False). If set, import automatically table definitions from the 7787 databases folder 7788 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7789 :lazy_tables (defaults to False): delay table definition until table access 7790 :after_connection (defaults to None): a callable that will be execute after the connection 7791 """ 7792 if uri == '<zombie>' and db_uid is not None: return 7793 if not decode_credentials: 7794 credential_decoder = lambda cred: cred 7795 else: 7796 credential_decoder = lambda cred: urllib.unquote(cred) 7797 self._folder = folder 7798 if folder: 7799 self.set_folder(folder) 7800 self._uri = uri 7801 self._pool_size = pool_size 7802 self._db_codec = db_codec 7803 self._lastsql = '' 7804 self._timings = [] 7805 self._pending_references = {} 7806 self._request_tenant = 'request_tenant' 7807 self._common_fields = [] 7808 self._referee_name = '%(table)s' 7809 self._bigint_id = bigint_id 7810 self._debug = debug 7811 self._migrated = [] 7812 self._LAZY_TABLES = {} 7813 self._lazy_tables = lazy_tables 7814 self._tables = SQLCallableList() 7815 self._driver_args = driver_args 7816 self._adapter_args = adapter_args 7817 self._check_reserved = check_reserved 7818 self._decode_credentials = decode_credentials 7819 self._attempts = attempts 7820 self._do_connect = do_connect 7821 self._ignore_field_case = ignore_field_case 7822 7823 if not str(attempts).isdigit() or attempts < 0: 7824 attempts = 5 7825 if uri: 7826 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7827 error = '' 7828 connected = False 7829 for k in range(attempts): 7830 for uri in uris: 7831 try: 7832 if is_jdbc and not uri.startswith('jdbc:'): 7833 uri = 'jdbc:'+uri 7834 self._dbname = REGEX_DBNAME.match(uri).group() 7835 if not self._dbname in ADAPTERS: 7836 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7837 # notice that driver args or {} else driver_args 7838 # defaults to {} global, not correct 7839 kwargs = dict(db=self,uri=uri, 7840 pool_size=pool_size, 7841 folder=folder, 7842 db_codec=db_codec, 7843 credential_decoder=credential_decoder, 7844 driver_args=driver_args or {}, 7845 adapter_args=adapter_args or {}, 7846 do_connect=do_connect, 7847 after_connection=after_connection, 7848 entity_quoting=entity_quoting) 7849 self._adapter = ADAPTERS[self._dbname](**kwargs) 7850 types = ADAPTERS[self._dbname].types 7851 # copy so multiple DAL() possible 7852 self._adapter.types = copy.copy(types) 7853 self._adapter.build_parsemap() 7854 self._adapter.ignore_field_case = ignore_field_case 7855 if bigint_id: 7856 if 'big-id' in types and 'reference' in types: 7857 self._adapter.types['id'] = types['big-id'] 7858 self._adapter.types['reference'] = types['big-reference'] 7859 connected = True 7860 break 7861 except SyntaxError: 7862 raise 7863 except Exception: 7864 tb = traceback.format_exc() 7865 LOGGER.debug('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7866 if connected: 7867 break 7868 else: 7869 time.sleep(1) 7870 if not connected: 7871 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7872 else: 7873 self._adapter = BaseAdapter(db=self,pool_size=0, 7874 uri='None',folder=folder, 7875 db_codec=db_codec, after_connection=after_connection, 7876 entity_quoting=entity_quoting) 7877 migrate = fake_migrate = False 7878 adapter = self._adapter 7879 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7880 self.check_reserved = check_reserved 7881 if self.check_reserved: 7882 from reserved_sql_keywords import ADAPTERS as RSK 7883 self.RSK = RSK 7884 self._migrate = migrate 7885 self._fake_migrate = fake_migrate 7886 self._migrate_enabled = migrate_enabled 7887 self._fake_migrate_all = fake_migrate_all 7888 if auto_import or tables: 7889 self.import_table_definitions(adapter.folder, 7890 tables=tables)
7891 7892 @property
7893 - def tables(self):
7894 return self._tables
7895
7896 - def import_table_definitions(self, path, migrate=False, 7897 fake_migrate=False, tables=None):
7898 if tables: 7899 for table in tables: 7900 self.define_table(**table) 7901 else: 7902 pattern = pjoin(path,self._uri_hash+'_*.table') 7903 for filename in glob.glob(pattern): 7904 tfile = self._adapter.file_open(filename, 'r') 7905 try: 7906 sql_fields = pickle.load(tfile) 7907 name = filename[len(pattern)-7:-6] 7908 mf = [(value['sortable'], 7909 Field(key, 7910 type=value['type'], 7911 length=value.get('length',None), 7912 notnull=value.get('notnull',False), 7913 unique=value.get('unique',False))) \ 7914 for key, value in sql_fields.iteritems()] 7915 mf.sort(lambda a,b: cmp(a[0],b[0])) 7916 self.define_table(name,*[item[1] for item in mf], 7917 **dict(migrate=migrate, 7918 fake_migrate=fake_migrate)) 7919 finally: 7920 self._adapter.file_close(tfile)
7921
7922 - def check_reserved_keyword(self, name):
7923 """ 7924 Validates ``name`` against SQL keywords 7925 Uses self.check_reserve which is a list of 7926 operators to use. 7927 self.check_reserved 7928 ['common', 'postgres', 'mysql'] 7929 self.check_reserved 7930 ['all'] 7931 """ 7932 for backend in self.check_reserved: 7933 if name.upper() in self.RSK[backend]: 7934 raise SyntaxError( 7935 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7936
7937 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7938 """ 7939 EXAMPLE: 7940 7941 db.define_table('person',Field('name'),Field('info')) 7942 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7943 7944 @request.restful() 7945 def index(): 7946 def GET(*args,**vars): 7947 patterns = [ 7948 "/friends[person]", 7949 "/{person.name}/:field", 7950 "/{person.name}/pets[pet.ownedby]", 7951 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7952 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7953 ("/dogs[pet]", db.pet.info=='dog'), 7954 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7955 ] 7956 parser = db.parse_as_rest(patterns,args,vars) 7957 if parser.status == 200: 7958 return dict(content=parser.response) 7959 else: 7960 raise HTTP(parser.status,parser.error) 7961 7962 def POST(table_name,**vars): 7963 if table_name == 'person': 7964 return db.person.validate_and_insert(**vars) 7965 elif table_name == 'pet': 7966 return db.pet.validate_and_insert(**vars) 7967 else: 7968 raise HTTP(400) 7969 return locals() 7970 """ 7971 7972 db = self 7973 re1 = REGEX_SEARCH_PATTERN 7974 re2 = REGEX_SQUARE_BRACKETS 7975 7976 def auto_table(table,base='',depth=0): 7977 patterns = [] 7978 for field in db[table].fields: 7979 if base: 7980 tag = '%s/%s' % (base,field.replace('_','-')) 7981 else: 7982 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7983 f = db[table][field] 7984 if not f.readable: continue 7985 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7986 tag += '/{%s.%s}' % (table,field) 7987 patterns.append(tag) 7988 patterns.append(tag+'/:field') 7989 elif f.type.startswith('boolean'): 7990 tag += '/{%s.%s}' % (table,field) 7991 patterns.append(tag) 7992 patterns.append(tag+'/:field') 7993 elif f.type in ('float','double','integer','bigint'): 7994 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7995 patterns.append(tag) 7996 patterns.append(tag+'/:field') 7997 elif f.type.startswith('list:'): 7998 tag += '/{%s.%s.contains}' % (table,field) 7999 patterns.append(tag) 8000 patterns.append(tag+'/:field') 8001 elif f.type in ('date','datetime'): 8002 tag+= '/{%s.%s.year}' % (table,field) 8003 patterns.append(tag) 8004 patterns.append(tag+'/:field') 8005 tag+='/{%s.%s.month}' % (table,field) 8006 patterns.append(tag) 8007 patterns.append(tag+'/:field') 8008 tag+='/{%s.%s.day}' % (table,field) 8009 patterns.append(tag) 8010 patterns.append(tag+'/:field') 8011 if f.type in ('datetime','time'): 8012 tag+= '/{%s.%s.hour}' % (table,field) 8013 patterns.append(tag) 8014 patterns.append(tag+'/:field') 8015 tag+='/{%s.%s.minute}' % (table,field) 8016 patterns.append(tag) 8017 patterns.append(tag+'/:field') 8018 tag+='/{%s.%s.second}' % (table,field) 8019 patterns.append(tag) 8020 patterns.append(tag+'/:field') 8021 if depth>0: 8022 for f in db[table]._referenced_by: 8023 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 8024 patterns.append(tag) 8025 patterns += auto_table(table,base=tag,depth=depth-1) 8026 return patterns
8027 8028 if patterns == 'auto': 8029 patterns=[] 8030 for table in db.tables: 8031 if not table.startswith('auth_'): 8032 patterns.append('/%s[%s]' % (table,table)) 8033 patterns += auto_table(table,base='',depth=1) 8034 else: 8035 i = 0 8036 while i<len(patterns): 8037 pattern = patterns[i] 8038 if not isinstance(pattern,str): 8039 pattern = pattern[0] 8040 tokens = pattern.split('/') 8041 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 8042 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 8043 '/'.join(tokens[:-1])) 8044 patterns = patterns[:i]+new_patterns+patterns[i+1:] 8045 i += len(new_patterns) 8046 else: 8047 i += 1 8048 if '/'.join(args) == 'patterns': 8049 return Row({'status':200,'pattern':'list', 8050 'error':None,'response':patterns}) 8051 for pattern in patterns: 8052 basequery, exposedfields = None, [] 8053 if isinstance(pattern,tuple): 8054 if len(pattern)==2: 8055 pattern, basequery = pattern 8056 elif len(pattern)>2: 8057 pattern, basequery, exposedfields = pattern[0:3] 8058 otable=table=None 8059 if not isinstance(queries,dict): 8060 dbset=db(queries) 8061 if basequery is not None: 8062 dbset = dbset(basequery) 8063 i=0 8064 tags = pattern[1:].split('/') 8065 if len(tags)!=len(args): 8066 continue 8067 for tag in tags: 8068 if re1.match(tag): 8069 # print 're1:'+tag 8070 tokens = tag[1:-1].split('.') 8071 table, field = tokens[0], tokens[1] 8072 if not otable or table == otable: 8073 if len(tokens)==2 or tokens[2]=='eq': 8074 query = db[table][field]==args[i] 8075 elif tokens[2]=='ne': 8076 query = db[table][field]!=args[i] 8077 elif tokens[2]=='lt': 8078 query = db[table][field]<args[i] 8079 elif tokens[2]=='gt': 8080 query = db[table][field]>args[i] 8081 elif tokens[2]=='ge': 8082 query = db[table][field]>=args[i] 8083 elif tokens[2]=='le': 8084 query = db[table][field]<=args[i] 8085 elif tokens[2]=='year': 8086 query = db[table][field].year()==args[i] 8087 elif tokens[2]=='month': 8088 query = db[table][field].month()==args[i] 8089 elif tokens[2]=='day': 8090 query = db[table][field].day()==args[i] 8091 elif tokens[2]=='hour': 8092 query = db[table][field].hour()==args[i] 8093 elif tokens[2]=='minute': 8094 query = db[table][field].minutes()==args[i] 8095 elif tokens[2]=='second': 8096 query = db[table][field].seconds()==args[i] 8097 elif tokens[2]=='startswith': 8098 query = db[table][field].startswith(args[i]) 8099 elif tokens[2]=='contains': 8100 query = db[table][field].contains(args[i]) 8101 else: 8102 raise RuntimeError("invalid pattern: %s" % pattern) 8103 if len(tokens)==4 and tokens[3]=='not': 8104 query = ~query 8105 elif len(tokens)>=4: 8106 raise RuntimeError("invalid pattern: %s" % pattern) 8107 if not otable and isinstance(queries,dict): 8108 dbset = db(queries[table]) 8109 if basequery is not None: 8110 dbset = dbset(basequery) 8111 dbset=dbset(query) 8112 else: 8113 raise RuntimeError("missing relation in pattern: %s" % pattern) 8114 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 8115 ref = tag[tag.find('[')+1:-1] 8116 if '.' in ref and otable: 8117 table,field = ref.split('.') 8118 selfld = '_id' 8119 if db[table][field].type.startswith('reference '): 8120 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 8121 else: 8122 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 8123 if refs: 8124 selfld = refs[0] 8125 if nested_select: 8126 try: 8127 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 8128 except ValueError: 8129 return Row({'status':400,'pattern':pattern, 8130 'error':'invalid path','response':None}) 8131 else: 8132 items = [item.id for item in dbset.select(db[otable][selfld])] 8133 dbset=db(db[table][field].belongs(items)) 8134 else: 8135 table = ref 8136 if not otable and isinstance(queries,dict): 8137 dbset = db(queries[table]) 8138 dbset=dbset(db[table]) 8139 elif tag==':field' and table: 8140 # print 're3:'+tag 8141 field = args[i] 8142 if not field in db[table]: break 8143 # hand-built patterns should respect .readable=False as well 8144 if not db[table][field].readable: 8145 return Row({'status':418,'pattern':pattern, 8146 'error':'I\'m a teapot','response':None}) 8147 try: 8148 distinct = vars.get('distinct', False) == 'True' 8149 offset = long(vars.get('offset',None) or 0) 8150 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 8151 except ValueError: 8152 return Row({'status':400,'error':'invalid limits','response':None}) 8153 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 8154 if items: 8155 return Row({'status':200,'response':items, 8156 'pattern':pattern}) 8157 else: 8158 return Row({'status':404,'pattern':pattern, 8159 'error':'no record found','response':None}) 8160 elif tag != args[i]: 8161 break 8162 otable = table 8163 i += 1 8164 if i == len(tags) and table: 8165 if hasattr(db[table], '_id'): 8166 ofields = vars.get('order', db[table]._id.name).split('|') 8167 else: 8168 ofields = vars.get('order', db[table]._primarykey[0]).split('|') 8169 try: 8170 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 8171 except (KeyError, AttributeError): 8172 return Row({'status':400,'error':'invalid orderby','response':None}) 8173 if exposedfields: 8174 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 8175 else: 8176 fields = [field for field in db[table] if field.readable] 8177 count = dbset.count() 8178 try: 8179 offset = long(vars.get('offset',None) or 0) 8180 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 8181 except ValueError: 8182 return Row({'status':400,'error':'invalid limits','response':None}) 8183 #if count > limits[1]-limits[0]: 8184 # return Row({'status':400,'error':'too many records','response':None}) 8185 try: 8186 response = dbset.select(limitby=limits,orderby=orderby,*fields) 8187 except ValueError: 8188 return Row({'status':400,'pattern':pattern, 8189 'error':'invalid path','response':None}) 8190 return Row({'status':200,'response':response, 8191 'pattern':pattern,'count':count}) 8192 return Row({'status':400,'error':'no matching pattern','response':None})
8193
8194 - def define_table( 8195 self, 8196 tablename, 8197 *fields, 8198 **args 8199 ):
8200 if not fields and 'fields' in args: 8201 fields = args.get('fields',()) 8202 if not isinstance(tablename, str): 8203 if isinstance(tablename, unicode): 8204 try: 8205 tablename = str(tablename) 8206 except UnicodeEncodeError: 8207 raise SyntaxError("invalid unicode table name") 8208 else: 8209 raise SyntaxError("missing table name") 8210 elif hasattr(self,tablename) or tablename in self.tables: 8211 if not args.get('redefine',False): 8212 raise SyntaxError('table already defined: %s' % tablename) 8213 elif tablename.startswith('_') or hasattr(self,tablename) or \ 8214 REGEX_PYTHON_KEYWORDS.match(tablename): 8215 raise SyntaxError('invalid table name: %s' % tablename) 8216 elif self.check_reserved: 8217 self.check_reserved_keyword(tablename) 8218 else: 8219 invalid_args = set(args)-TABLE_ARGS 8220 if invalid_args: 8221 raise SyntaxError('invalid table "%s" attributes: %s' \ 8222 % (tablename,invalid_args)) 8223 if self._lazy_tables and not tablename in self._LAZY_TABLES: 8224 self._LAZY_TABLES[tablename] = (tablename,fields,args) 8225 table = None 8226 else: 8227 table = self.lazy_define_table(tablename,*fields,**args) 8228 if not tablename in self.tables: 8229 self.tables.append(tablename) 8230 return table
8231
8232 - def lazy_define_table( 8233 self, 8234 tablename, 8235 *fields, 8236 **args 8237 ):
8238 args_get = args.get 8239 common_fields = self._common_fields 8240 if common_fields: 8241 fields = list(fields) + list(common_fields) 8242 8243 table_class = args_get('table_class',Table) 8244 table = table_class(self, tablename, *fields, **args) 8245 table._actual = True 8246 self[tablename] = table 8247 # must follow above line to handle self references 8248 table._create_references() 8249 for field in table: 8250 if field.requires == DEFAULT: 8251 field.requires = sqlhtml_validators(field) 8252 8253 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 8254 if migrate and not self._uri in (None,'None') \ 8255 or self._adapter.dbengine=='google:datastore': 8256 fake_migrate = self._fake_migrate_all or \ 8257 args_get('fake_migrate',self._fake_migrate) 8258 polymodel = args_get('polymodel',None) 8259 try: 8260 GLOBAL_LOCKER.acquire() 8261 self._lastsql = self._adapter.create_table( 8262 table,migrate=migrate, 8263 fake_migrate=fake_migrate, 8264 polymodel=polymodel) 8265 finally: 8266 GLOBAL_LOCKER.release() 8267 else: 8268 table._dbt = None 8269 on_define = args_get('on_define',None) 8270 if on_define: on_define(table) 8271 return table
8272
8273 - def as_dict(self, flat=False, sanitize=True):
8274 db_uid = uri = None 8275 if not sanitize: 8276 uri, db_uid = (self._uri, self._db_uid) 8277 db_as_dict = dict(tables=[], uri=uri, db_uid=db_uid, 8278 **dict([(k, getattr(self, "_" + k, None)) 8279 for k in 'pool_size','folder','db_codec', 8280 'check_reserved','migrate','fake_migrate', 8281 'migrate_enabled','fake_migrate_all', 8282 'decode_credentials','driver_args', 8283 'adapter_args', 'attempts', 8284 'bigint_id','debug','lazy_tables', 8285 'do_connect'])) 8286 for table in self: 8287 db_as_dict["tables"].append(table.as_dict(flat=flat, 8288 sanitize=sanitize)) 8289 return db_as_dict
8290
8291 - def as_xml(self, sanitize=True):
8292 if not have_serializers: 8293 raise ImportError("No xml serializers available") 8294 d = self.as_dict(flat=True, sanitize=sanitize) 8295 return serializers.xml(d)
8296
8297 - def as_json(self, sanitize=True):
8298 if not have_serializers: 8299 raise ImportError("No json serializers available") 8300 d = self.as_dict(flat=True, sanitize=sanitize) 8301 return serializers.json(d)
8302
8303 - def as_yaml(self, sanitize=True):
8304 if not have_serializers: 8305 raise ImportError("No YAML serializers available") 8306 d = self.as_dict(flat=True, sanitize=sanitize) 8307 return serializers.yaml(d)
8308
8309 - def __contains__(self, tablename):
8310 try: 8311 return tablename in self.tables 8312 except AttributeError: 8313 # The instance has no .tables attribute yet 8314 return False
8315 8316 has_key = __contains__ 8317
8318 - def get(self,key,default=None):
8319 return self.__dict__.get(key,default)
8320
8321 - def __iter__(self):
8322 for tablename in self.tables: 8323 yield self[tablename]
8324
8325 - def __getitem__(self, key):
8326 return self.__getattr__(str(key))
8327
8328 - def __getattr__(self, key):
8329 if ogetattr(self,'_lazy_tables') and \ 8330 key in ogetattr(self,'_LAZY_TABLES'): 8331 tablename, fields, args = self._LAZY_TABLES.pop(key) 8332 return self.lazy_define_table(tablename,*fields,**args) 8333 return ogetattr(self, key)
8334
8335 - def __setitem__(self, key, value):
8336 osetattr(self, str(key), value)
8337
8338 - def __setattr__(self, key, value):
8339 if key[:1]!='_' and key in self: 8340 raise SyntaxError( 8341 'Object %s exists and cannot be redefined' % key) 8342 osetattr(self,key,value)
8343 8344 __delitem__ = object.__delattr__ 8345
8346 - def __repr__(self):
8347 if hasattr(self,'_uri'): 8348 return '<DAL uri="%s">' % hide_password(self._adapter.uri) 8349 else: 8350 return '<DAL db_uid="%s">' % self._db_uid
8351
8352 - def smart_query(self,fields,text):
8353 return Set(self, smart_query(fields,text))
8354
8355 - def __call__(self, query=None, ignore_common_filters=None):
8356 if isinstance(query,Table): 8357 query = self._adapter.id_query(query) 8358 elif isinstance(query,Field): 8359 query = query!=None 8360 elif isinstance(query, dict): 8361 icf = query.get("ignore_common_filters") 8362 if icf: ignore_common_filters = icf 8363 return Set(self, query, ignore_common_filters=ignore_common_filters)
8364
8365 - def commit(self):
8366 self._adapter.commit()
8367
8368 - def rollback(self):
8369 self._adapter.rollback()
8370
8371 - def close(self):
8372 self._adapter.close() 8373 if self._db_uid in THREAD_LOCAL.db_instances: 8374 db_group = THREAD_LOCAL.db_instances[self._db_uid] 8375 db_group.remove(self) 8376 if not db_group: 8377 del THREAD_LOCAL.db_instances[self._db_uid]
8378
8379 - def executesql(self, query, placeholders=None, as_dict=False, 8380 fields=None, colnames=None, as_ordered_dict=False):
8381 """ 8382 placeholders is optional and will always be None. 8383 If using raw SQL with placeholders, placeholders may be 8384 a sequence of values to be substituted in 8385 or, (if supported by the DB driver), a dictionary with keys 8386 matching named placeholders in your SQL. 8387 8388 Added 2009-12-05 "as_dict" optional argument. Will always be 8389 None when using DAL. If using raw SQL can be set to True and 8390 the results cursor returned by the DB driver will be converted 8391 to a sequence of dictionaries keyed with the db field 8392 names. Tested with SQLite but should work with any database 8393 since the cursor.description used to get field names is part 8394 of the Python dbi 2.0 specs. Results returned with 8395 as_dict=True are the same as those returned when applying 8396 .to_list() to a DAL query. If "as_ordered_dict"=True the 8397 behaviour is the same as when "as_dict"=True with the keys 8398 (field names) guaranteed to be in the same order as returned 8399 by the select name executed on the database. 8400 8401 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 8402 8403 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 8404 is provided, the results cursor returned by the DB driver will be 8405 converted to a DAL Rows object using the db._adapter.parse() method. 8406 8407 The "fields" argument is a list of DAL Field objects that match the 8408 fields returned from the DB. The Field objects should be part of one or 8409 more Table objects defined on the DAL object. The "fields" list can 8410 include one or more DAL Table objects in addition to or instead of 8411 including Field objects, or it can be just a single table (not in a 8412 list). In that case, the Field objects will be extracted from the 8413 table(s). 8414 8415 Instead of specifying the "fields" argument, the "colnames" argument 8416 can be specified as a list of field names in tablename.fieldname format. 8417 Again, these should represent tables and fields defined on the DAL 8418 object. 8419 8420 It is also possible to specify both "fields" and the associated 8421 "colnames". In that case, "fields" can also include DAL Expression 8422 objects in addition to Field objects. For Field objects in "fields", 8423 the associated "colnames" must still be in tablename.fieldname format. 8424 For Expression objects in "fields", the associated "colnames" can 8425 be any arbitrary labels. 8426 8427 Note, the DAL Table objects referred to by "fields" or "colnames" can 8428 be dummy tables and do not have to represent any real tables in the 8429 database. Also, note that the "fields" and "colnames" must be in the 8430 same order as the fields in the results cursor returned from the DB. 8431 8432 """ 8433 adapter = self._adapter 8434 if placeholders: 8435 adapter.execute(query, placeholders) 8436 else: 8437 adapter.execute(query) 8438 if as_dict or as_ordered_dict: 8439 if not hasattr(adapter.cursor,'description'): 8440 raise RuntimeError("database does not support executesql(...,as_dict=True)") 8441 # Non-DAL legacy db query, converts cursor results to dict. 8442 # sequence of 7-item sequences. each sequence tells about a column. 8443 # first item is always the field name according to Python Database API specs 8444 columns = adapter.cursor.description 8445 # reduce the column info down to just the field names 8446 fields = colnames or [f[0] for f in columns] 8447 if len(fields) != len(set(fields)): 8448 raise RuntimeError("Result set includes duplicate column names. Specify unique column names using the 'colnames' argument") 8449 8450 # will hold our finished resultset in a list 8451 data = adapter._fetchall() 8452 # convert the list for each row into a dictionary so it's 8453 # easier to work with. row['field_name'] rather than row[0] 8454 if as_ordered_dict: 8455 _dict = OrderedDict 8456 else: 8457 _dict = dict 8458 return [_dict(zip(fields,row)) for row in data] 8459 try: 8460 data = adapter._fetchall() 8461 except: 8462 return None 8463 if fields or colnames: 8464 fields = [] if fields is None else fields 8465 if not isinstance(fields, list): 8466 fields = [fields] 8467 extracted_fields = [] 8468 for field in fields: 8469 if isinstance(field, Table): 8470 extracted_fields.extend([f for f in field]) 8471 else: 8472 extracted_fields.append(field) 8473 if not colnames: 8474 colnames = ['%s.%s' % (f.tablename, f.name) 8475 for f in extracted_fields] 8476 data = adapter.parse( 8477 data, fields=extracted_fields, colnames=colnames) 8478 return data
8479
8480 - def _remove_references_to(self, thistable):
8481 for table in self: 8482 table._referenced_by = [field for field in table._referenced_by 8483 if not field.table==thistable]
8484
8485 - def export_to_csv_file(self, ofile, *args, **kwargs):
8486 step = long(kwargs.get('max_fetch_rows,',500)) 8487 write_colnames = kwargs['write_colnames'] = \ 8488 kwargs.get("write_colnames", True) 8489 for table in self.tables: 8490 ofile.write('TABLE %s\r\n' % table) 8491 query = self._adapter.id_query(self[table]) 8492 nrows = self(query).count() 8493 kwargs['write_colnames'] = write_colnames 8494 for k in range(0,nrows,step): 8495 self(query).select(limitby=(k,k+step)).export_to_csv_file( 8496 ofile, *args, **kwargs) 8497 kwargs['write_colnames'] = False 8498 ofile.write('\r\n\r\n') 8499 ofile.write('END')
8500
8501 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 8502 unique='uuid', map_tablenames=None, 8503 ignore_missing_tables=False, 8504 *args, **kwargs):
8505 #if id_map is None: id_map={} 8506 id_offset = {} # only used if id_map is None 8507 map_tablenames = map_tablenames or {} 8508 for line in ifile: 8509 line = line.strip() 8510 if not line: 8511 continue 8512 elif line == 'END': 8513 return 8514 elif not line.startswith('TABLE ') or \ 8515 not line[6:] in self.tables: 8516 raise SyntaxError('invalid file format') 8517 else: 8518 tablename = line[6:] 8519 tablename = map_tablenames.get(tablename,tablename) 8520 if tablename is not None and tablename in self.tables: 8521 self[tablename].import_from_csv_file( 8522 ifile, id_map, null, unique, id_offset, 8523 *args, **kwargs) 8524 elif tablename is None or ignore_missing_tables: 8525 # skip all non-empty lines 8526 for line in ifile: 8527 if not line.strip(): 8528 break 8529 else: 8530 raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
8531
8532 8533 -def DAL_unpickler(db_uid):
8534 return DAL('<zombie>',db_uid=db_uid)
8535
8536 -def DAL_pickler(db):
8537 return DAL_unpickler, (db._db_uid,)
8538 8539 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
8540 8541 -class SQLALL(object):
8542 """ 8543 Helper class providing a comma-separated string having all the field names 8544 (prefixed by table name and '.') 8545 8546 normally only called from within gluon.sql 8547 """ 8548
8549 - def __init__(self, table):
8550 self._table = table
8551
8552 - def __str__(self):
8553 return ', '.join([str(field) for field in self._table])
8554
8555 # class Reference(int): 8556 -class Reference(long):
8557
8558 - def __allocate(self):
8559 if not self._record: 8560 self._record = self._table[long(self)] 8561 if not self._record: 8562 raise RuntimeError( 8563 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self)))
8564
8565 - def __getattr__(self, key):
8566 if key == 'id': 8567 return long(self) 8568 if key in self._table: 8569 self.__allocate() 8570 if self._record: 8571 return self._record.get(key,None) # to deal with case self.update_record() 8572 else: 8573 return None
8574
8575 - def get(self, key, default=None):
8576 return self.__getattr__(key, default)
8577
8578 - def __setattr__(self, key, value):
8579 if key.startswith('_'): 8580 long.__setattr__(self, key, value) 8581 return 8582 self.__allocate() 8583 self._record[key] = value
8584
8585 - def __getitem__(self, key):
8586 if key == 'id': 8587 return long(self) 8588 self.__allocate() 8589 return self._record.get(key, None)
8590
8591 - def __setitem__(self,key,value):
8592 self.__allocate() 8593 self._record[key] = value
8594
8595 8596 -def Reference_unpickler(data):
8597 return marshal.loads(data)
8598
8599 -def Reference_pickler(data):
8600 try: 8601 marshal_dump = marshal.dumps(long(data)) 8602 except AttributeError: 8603 marshal_dump = 'i%s' % struct.pack('<i', long(data)) 8604 return (Reference_unpickler, (marshal_dump,))
8605 8606 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8607 8608 -class MethodAdder(object):
8609 - def __init__(self,table):
8610 self.table = table
8611 - def __call__(self):
8612 return self.register()
8613 - def __getattr__(self,method_name):
8614 return self.register(method_name)
8615 - def register(self,method_name=None):
8616 def _decorated(f): 8617 instance = self.table 8618 import types 8619 method = types.MethodType(f, instance, instance.__class__) 8620 name = method_name or f.func_name 8621 setattr(instance, name, method) 8622 return f
8623 return _decorated
8624
8625 -class Table(object):
8626 8627 """ 8628 an instance of this class represents a database table 8629 8630 Example:: 8631 8632 db = DAL(...) 8633 db.define_table('users', Field('name')) 8634 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8635 db.users.drop() 8636 """ 8637
8638 - def __init__( 8639 self, 8640 db, 8641 tablename, 8642 *fields, 8643 **args):
8644 """ 8645 Initializes the table and performs checking on the provided fields. 8646 8647 Each table will have automatically an 'id'. 8648 8649 If a field is of type Table, the fields (excluding 'id') from that table 8650 will be used instead. 8651 8652 :raises SyntaxError: when a supplied field is of incorrect type. 8653 """ 8654 self._actual = False # set to True by define_table() 8655 self._tablename = tablename 8656 if (not isinstance(tablename, str) or tablename[0] == '_' 8657 or hasattr(DAL, tablename) or '.' in tablename 8658 or REGEX_PYTHON_KEYWORDS.match(tablename) 8659 ): 8660 raise SyntaxError('Field: invalid table name: %s, ' 8661 'use rname for "funny" names' % tablename) 8662 self._ot = None 8663 self._rname = args.get('rname') 8664 self._sequence_name = (args.get('sequence_name') or 8665 db and db._adapter.sequence_name(self._rname 8666 or tablename)) 8667 self._trigger_name = (args.get('trigger_name') or 8668 db and db._adapter.trigger_name(tablename)) 8669 self._common_filter = args.get('common_filter') 8670 self._format = args.get('format') 8671 self._singular = args.get( 8672 'singular', tablename.replace('_', ' ').capitalize()) 8673 self._plural = args.get( 8674 'plural', pluralize(self._singular.lower()).capitalize()) 8675 # horrible but for backard compatibility of appamdin: 8676 if 'primarykey' in args and args['primarykey'] is not None: 8677 self._primarykey = args.get('primarykey') 8678 8679 self._before_insert = [] 8680 self._before_update = [Set.delete_uploaded_files] 8681 self._before_delete = [Set.delete_uploaded_files] 8682 self._after_insert = [] 8683 self._after_update = [] 8684 self._after_delete = [] 8685 8686 self.add_method = MethodAdder(self) 8687 8688 fieldnames, newfields=set(), [] 8689 _primarykey = getattr(self, '_primarykey', None) 8690 if _primarykey is not None: 8691 if not isinstance(_primarykey, list): 8692 raise SyntaxError( 8693 "primarykey must be a list of fields from table '%s'" 8694 % tablename) 8695 if len(_primarykey) == 1: 8696 self._id = [f for f in fields if isinstance(f, Field) 8697 and f.name ==_primarykey[0]][0] 8698 elif not [f for f in fields if (isinstance(f, Field) and 8699 f.type == 'id') or (isinstance(f, dict) and 8700 f.get("type", None) == "id")]: 8701 field = Field('id', 'id') 8702 newfields.append(field) 8703 fieldnames.add('id') 8704 self._id = field 8705 virtual_fields = [] 8706 8707 def include_new(field): 8708 newfields.append(field) 8709 fieldnames.add(field.name) 8710 if field.type == 'id': 8711 self._id = field
8712 for field in fields: 8713 if isinstance(field, (FieldMethod, FieldVirtual)): 8714 virtual_fields.append(field) 8715 elif isinstance(field, Field) and not field.name in fieldnames: 8716 if field.db is not None: 8717 field = copy.copy(field) 8718 include_new(field) 8719 elif isinstance(field, dict) and not field['fieldname'] in fieldnames: 8720 include_new(Field(**field)) 8721 elif isinstance(field, Table): 8722 table = field 8723 for field in table: 8724 if not field.name in fieldnames and not field.type == 'id': 8725 t2 = not table._actual and self._tablename 8726 include_new(field.clone(point_self_references_to=t2)) 8727 elif not isinstance(field, (Field, Table)): 8728 raise SyntaxError( 8729 'define_table argument is not a Field or Table: %s' % field) 8730 fields = newfields 8731 self._db = db 8732 tablename = tablename 8733 self._fields = SQLCallableList() 8734 self.virtualfields = [] 8735 fields = list(fields) 8736 8737 if db and db._adapter.uploads_in_blob is True: 8738 uploadfields = [f.name for f in fields if f.type == 'blob'] 8739 for field in fields: 8740 fn = field.uploadfield 8741 if isinstance(field, Field) and field.type == 'upload'\ 8742 and fn is True and not field.uploadfs: 8743 fn = field.uploadfield = '%s_blob' % field.name 8744 if isinstance(fn, str) and not fn in uploadfields and not field.uploadfs: 8745 fields.append(Field(fn, 'blob', default='', 8746 writable=False, readable=False)) 8747 8748 fieldnames_set = set() 8749 reserved = dir(Table) + ['fields'] 8750 if (db and db.check_reserved): 8751 check_reserved = db.check_reserved_keyword 8752 else: 8753 def check_reserved(field_name): 8754 if field_name in reserved: 8755 raise SyntaxError("field name %s not allowed" % field_name)
8756 for field in fields: 8757 field_name = field.name 8758 check_reserved(field_name) 8759 if db and db._ignore_field_case: 8760 fname_item = field_name.lower() 8761 else: 8762 fname_item = field_name 8763 if fname_item in fieldnames_set: 8764 raise SyntaxError("duplicate field %s in table %s" % 8765 (field_name, tablename)) 8766 else: 8767 fieldnames_set.add(fname_item) 8768 8769 self.fields.append(field_name) 8770 self[field_name] = field 8771 if field.type == 'id': 8772 self['id'] = field 8773 field.tablename = field._tablename = tablename 8774 field.table = field._table = self 8775 field.db = field._db = db 8776 self.ALL = SQLALL(self) 8777 8778 if _primarykey is not None: 8779 for k in _primarykey: 8780 if k not in self.fields: 8781 raise SyntaxError( 8782 "primarykey must be a list of fields from table '%s " % 8783 tablename) 8784 else: 8785 self[k].notnull = True 8786 for field in virtual_fields: 8787 self[field.name] = field 8788 8789 @property
8790 - def fields(self):
8791 return self._fields
8792
8793 - def update(self, *args, **kwargs):
8794 raise RuntimeError("Syntax Not Supported")
8795
8796 - def _enable_record_versioning(self, 8797 archive_db=None, 8798 archive_name='%(tablename)s_archive', 8799 is_active='is_active', 8800 current_record='current_record', 8801 current_record_label=None):
8802 db = self._db 8803 archive_db = archive_db or db 8804 archive_name = archive_name % dict(tablename=self._tablename) 8805 if archive_name in archive_db.tables(): 8806 return # do not try define the archive if already exists 8807 fieldnames = self.fields() 8808 same_db = archive_db is db 8809 field_type = self if same_db else 'bigint' 8810 clones = [] 8811 for field in self: 8812 nfk = same_db or not field.type.startswith('reference') 8813 clones.append( 8814 field.clone(unique=False, type=field.type if nfk else 'bigint') 8815 ) 8816 archive_db.define_table( 8817 archive_name, 8818 Field(current_record, field_type, label=current_record_label), 8819 *clones, **dict(format=self._format)) 8820 8821 self._before_update.append( 8822 lambda qset, fs, db=archive_db, an=archive_name, cn=current_record: 8823 archive_record(qset, fs, db[an], cn)) 8824 if is_active and is_active in fieldnames: 8825 self._before_delete.append( 8826 lambda qset: qset.update(is_active=False)) 8827 newquery = lambda query, t=self, name=self._tablename: \ 8828 reduce(AND, [db[tn].is_active == True 8829 for tn in db._adapter.tables(query) 8830 if tn == name or getattr(db[tn],'_ot',None)==name]) 8831 query = self._common_filter 8832 if query: 8833 newquery = query & newquery 8834 self._common_filter = newquery
8835
8836 - def _validate(self, **vars):
8837 errors = Row() 8838 for key, value in vars.iteritems(): 8839 value, error = self[key].validate(value) 8840 if error: 8841 errors[key] = error 8842 return errors
8843
8844 - def _create_references(self):
8845 db = self._db 8846 pr = db._pending_references 8847 self._referenced_by = [] 8848 self._references = [] 8849 for field in self: 8850 #fieldname = field.name ##FIXME not used ? 8851 field_type = field.type 8852 if isinstance(field_type, str) and field_type[:10] == 'reference ': 8853 ref = field_type[10:].strip() 8854 if not ref: 8855 SyntaxError('Table: reference to nothing: %s' % ref) 8856 if '.' in ref: 8857 rtablename, throw_it, rfieldname = ref.partition('.') 8858 else: 8859 rtablename, rfieldname = ref, None 8860 if not rtablename in db: 8861 pr[rtablename] = pr.get(rtablename, []) + [field] 8862 continue 8863 rtable = db[rtablename] 8864 if rfieldname: 8865 if not hasattr(rtable, '_primarykey'): 8866 raise SyntaxError( 8867 'keyed tables can only reference other keyed tables (for now)') 8868 if rfieldname not in rtable.fields: 8869 raise SyntaxError( 8870 "invalid field '%s' for referenced table '%s'" 8871 " in table '%s'" % (rfieldname, rtablename, self._tablename) 8872 ) 8873 rfield = rtable[rfieldname] 8874 else: 8875 rfield = rtable._id 8876 rtable._referenced_by.append(field) 8877 field.referent = rfield 8878 self._references.append(field) 8879 else: 8880 field.referent = None 8881 if self._tablename in pr: 8882 referees = pr.pop(self._tablename) 8883 for referee in referees: 8884 self._referenced_by.append(referee)
8885
8886 - def _filter_fields(self, record, id=False):
8887 return dict([(k, v) for (k, v) in record.iteritems() if k 8888 in self.fields and (self[k].type!='id' or id)])
8889
8890 - def _build_query(self,key):
8891 """ for keyed table only """ 8892 query = None 8893 for k,v in key.iteritems(): 8894 if k in self._primarykey: 8895 if query: 8896 query = query & (self[k] == v) 8897 else: 8898 query = (self[k] == v) 8899 else: 8900 raise SyntaxError( 8901 'Field %s is not part of the primary key of %s' % 8902 (k,self._tablename) 8903 ) 8904 return query
8905
8906 - def __getitem__(self, key):
8907 if not key: 8908 return None 8909 elif isinstance(key, dict): 8910 """ for keyed table """ 8911 query = self._build_query(key) 8912 return self._db(query).select(limitby=(0, 1), orderby_on_limitby=False).first() 8913 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8914 return self._db(self._id == key).select(limitby=(0, 1), orderby_on_limitby=False).first() 8915 elif key: 8916 return ogetattr(self, str(key))
8917
8918 - def __call__(self, key=DEFAULT, **kwargs):
8919 for_update = kwargs.get('_for_update', False) 8920 if '_for_update' in kwargs: 8921 del kwargs['_for_update'] 8922 8923 orderby = kwargs.get('_orderby', None) 8924 if '_orderby' in kwargs: 8925 del kwargs['_orderby'] 8926 8927 if not key is DEFAULT: 8928 if isinstance(key, Query): 8929 record = self._db(key).select( 8930 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8931 elif not str(key).isdigit(): 8932 record = None 8933 else: 8934 record = self._db(self._id == key).select( 8935 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8936 if record: 8937 for k,v in kwargs.iteritems(): 8938 if record[k]!=v: return None 8939 return record 8940 elif kwargs: 8941 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8942 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8943 else: 8944 return None
8945
8946 - def __setitem__(self, key, value):
8947 if isinstance(key, dict) and isinstance(value, dict): 8948 """ option for keyed table """ 8949 if set(key.keys()) == set(self._primarykey): 8950 value = self._filter_fields(value) 8951 kv = {} 8952 kv.update(value) 8953 kv.update(key) 8954 if not self.insert(**kv): 8955 query = self._build_query(key) 8956 self._db(query).update(**self._filter_fields(value)) 8957 else: 8958 raise SyntaxError( 8959 'key must have all fields from primary key: %s'% 8960 (self._primarykey)) 8961 elif str(key).isdigit(): 8962 if key == 0: 8963 self.insert(**self._filter_fields(value)) 8964 elif self._db(self._id == key)\ 8965 .update(**self._filter_fields(value)) is None: 8966 raise SyntaxError('No such record: %s' % key) 8967 else: 8968 if isinstance(key, dict): 8969 raise SyntaxError( 8970 'value must be a dictionary: %s' % value) 8971 osetattr(self, str(key), value)
8972 8973 __getattr__ = __getitem__ 8974
8975 - def __setattr__(self, key, value):
8976 if key[:1]!='_' and key in self: 8977 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8978 osetattr(self,key,value)
8979
8980 - def __delitem__(self, key):
8981 if isinstance(key, dict): 8982 query = self._build_query(key) 8983 if not self._db(query).delete(): 8984 raise SyntaxError('No such record: %s' % key) 8985 elif not str(key).isdigit() or \ 8986 not self._db(self._id == key).delete(): 8987 raise SyntaxError('No such record: %s' % key)
8988
8989 - def __contains__(self,key):
8990 return hasattr(self, key)
8991 8992 has_key = __contains__ 8993
8994 - def items(self):
8995 return self.__dict__.items()
8996
8997 - def __iter__(self):
8998 for fieldname in self.fields: 8999 yield self[fieldname]
9000
9001 - def iteritems(self):
9002 return self.__dict__.iteritems()
9003
9004 - def __repr__(self):
9005 return '<Table %s (%s)>' % (self._tablename, ','.join(self.fields()))
9006
9007 - def __str__(self):
9008 if self._ot is not None: 9009 ot = self._ot 9010 if 'Oracle' in str(type(self._db._adapter)): 9011 return '%s %s' % (ot, self._tablename) 9012 return '%s AS %s' % (ot, self._tablename) 9013 9014 return self._tablename
9015 9016 @property
9017 - def sqlsafe(self):
9018 rname = self._rname 9019 if rname: return rname 9020 return self._db._adapter.sqlsafe_table(self._tablename)
9021 9022 @property
9023 - def sqlsafe_alias(self):
9024 rname = self._rname 9025 ot = self._ot 9026 if rname and not ot: return rname 9027 return self._db._adapter.sqlsafe_table(self._tablename, self._ot)
9028 9029
9030 - def _drop(self, mode=''):
9031 return self._db._adapter._drop(self, mode)
9032
9033 - def drop(self, mode=''):
9034 return self._db._adapter.drop(self,mode)
9035
9036 - def _listify(self,fields,update=False):
9037 new_fields = {} # format: new_fields[name] = (field,value) 9038 9039 # store all fields passed as input in new_fields 9040 for name in fields: 9041 if not name in self.fields: 9042 if name != 'id': 9043 raise SyntaxError( 9044 'Field %s does not belong to the table' % name) 9045 else: 9046 field = self[name] 9047 value = fields[name] 9048 if field.filter_in: 9049 value = field.filter_in(value) 9050 new_fields[name] = (field, value) 9051 9052 # check all fields that should be in the table but are not passed 9053 to_compute = [] 9054 for ofield in self: 9055 name = ofield.name 9056 if not name in new_fields: 9057 # if field is supposed to be computed, compute it! 9058 if ofield.compute: # save those to compute for later 9059 to_compute.append((name, ofield)) 9060 # if field is required, check its default value 9061 elif not update and not ofield.default is None: 9062 value = ofield.default 9063 fields[name] = value 9064 new_fields[name] = (ofield, value) 9065 # if this is an update, user the update field instead 9066 elif update and not ofield.update is None: 9067 value = ofield.update 9068 fields[name] = value 9069 new_fields[name] = (ofield, value) 9070 # if the field is still not there but it should, error 9071 elif not update and ofield.required: 9072 raise RuntimeError( 9073 'Table: missing required field: %s' % name) 9074 # now deal with fields that are supposed to be computed 9075 if to_compute: 9076 row = Row(fields) 9077 for name, ofield in to_compute: 9078 # try compute it 9079 try: 9080 row[name] = new_value = ofield.compute(row) 9081 new_fields[name] = (ofield, new_value) 9082 except (KeyError, AttributeError): 9083 # error silently unless field is required! 9084 if ofield.required: 9085 raise SyntaxError('unable to compute field: %s' % name) 9086 return new_fields.values()
9087
9088 - def _attempt_upload(self, fields):
9089 for field in self: 9090 if field.type == 'upload' and field.name in fields: 9091 value = fields[field.name] 9092 if value is not None and not isinstance(value, str): 9093 if hasattr(value, 'file') and hasattr(value, 'filename'): 9094 new_name = field.store(value.file, filename=value.filename) 9095 elif hasattr(value, 'read') and hasattr(value, 'name'): 9096 new_name = field.store(value, filename=value.name) 9097 else: 9098 raise RuntimeError("Unable to handle upload") 9099 fields[field.name] = new_name
9100
9101 - def _defaults(self, fields):
9102 "If there are no fields/values specified, return table defaults" 9103 if not fields: 9104 fields = {} 9105 for field in self: 9106 if field.type != "id": 9107 fields[field.name] = field.default 9108 return fields
9109
9110 - def _insert(self, **fields):
9111 fields = self._defaults(fields) 9112 return self._db._adapter._insert(self, self._listify(fields))
9113
9114 - def insert(self, **fields):
9115 fields = self._defaults(fields) 9116 self._attempt_upload(fields) 9117 if any(f(fields) for f in self._before_insert): return 0 9118 ret = self._db._adapter.insert(self, self._listify(fields)) 9119 if ret and self._after_insert: 9120 fields = Row(fields) 9121 [f(fields,ret) for f in self._after_insert] 9122 return ret
9123
9124 - def validate_and_insert(self, **fields):
9125 response = Row() 9126 response.errors = Row() 9127 new_fields = copy.copy(fields) 9128 for key,value in fields.iteritems(): 9129 value,error = self[key].validate(value) 9130 if error: 9131 response.errors[key] = "%s" % error 9132 else: 9133 new_fields[key] = value 9134 if not response.errors: 9135 response.id = self.insert(**new_fields) 9136 else: 9137 response.id = None 9138 return response
9139
9140 - def validate_and_update(self, _key=DEFAULT, **fields):
9141 response = Row() 9142 response.errors = Row() 9143 new_fields = copy.copy(fields) 9144 9145 for key, value in fields.iteritems(): 9146 value, error = self[key].validate(value) 9147 if error: 9148 response.errors[key] = "%s" % error 9149 else: 9150 new_fields[key] = value 9151 9152 if _key is DEFAULT: 9153 record = self(**fields) 9154 elif isinstance(_key, dict): 9155 record = self(**_key) 9156 else: 9157 record = self(_key) 9158 9159 if not response.errors and record: 9160 if '_id' in self: 9161 myset = self._db(self._id == record[self._id.name]) 9162 else: 9163 query = None 9164 for key, value in _key.iteritems(): 9165 if query is None: 9166 query = getattr(self, key) == value 9167 else: 9168 query = query & (getattr(self, key) == value) 9169 myset = self._db(query) 9170 response.id = myset.update(**fields) 9171 else: 9172 response.id = None 9173 return response
9174
9175 - def update_or_insert(self, _key=DEFAULT, **values):
9176 if _key is DEFAULT: 9177 record = self(**values) 9178 elif isinstance(_key, dict): 9179 record = self(**_key) 9180 else: 9181 record = self(_key) 9182 if record: 9183 record.update_record(**values) 9184 newid = None 9185 else: 9186 newid = self.insert(**values) 9187 return newid
9188
9189 - def validate_and_update_or_insert(self, _key=DEFAULT, **fields):
9190 if _key is DEFAULT or _key == '': 9191 primary_keys = {} 9192 for key, value in fields.iteritems(): 9193 if key in self._primarykey: 9194 primary_keys[key] = value 9195 if primary_keys != {}: 9196 record = self(**primary_keys) 9197 _key = primary_keys 9198 else: 9199 required_keys = {} 9200 for key, value in fields.iteritems(): 9201 if getattr(self, key).required: 9202 required_keys[key] = value 9203 record = self(**required_keys) 9204 _key = required_keys 9205 elif isinstance(_key, dict): 9206 record = self(**_key) 9207 else: 9208 record = self(_key) 9209 9210 if record: 9211 response = self.validate_and_update(_key, **fields) 9212 primary_keys = {} 9213 for key in self._primarykey: 9214 primary_keys[key] = getattr(record, key) 9215 response.id = primary_keys 9216 else: 9217 response = self.validate_and_insert(**fields) 9218 return response
9219
9220 - def bulk_insert(self, items):
9221 """ 9222 here items is a list of dictionaries 9223 """ 9224 items = [self._listify(item) for item in items] 9225 if any(f(item) for item in items for f in self._before_insert):return 0 9226 ret = self._db._adapter.bulk_insert(self,items) 9227 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 9228 return ret
9229
9230 - def _truncate(self, mode=None):
9231 return self._db._adapter._truncate(self, mode)
9232
9233 - def truncate(self, mode=None):
9234 return self._db._adapter.truncate(self, mode)
9235
9236 - def import_from_csv_file( 9237 self, 9238 csvfile, 9239 id_map=None, 9240 null='<NULL>', 9241 unique='uuid', 9242 id_offset=None, # id_offset used only when id_map is None 9243 *args, **kwargs 9244 ):
9245 """ 9246 Import records from csv file. 9247 Column headers must have same names as table fields. 9248 Field 'id' is ignored. 9249 If column names read 'table.file' the 'table.' prefix is ignored. 9250 'unique' argument is a field which must be unique 9251 (typically a uuid field) 9252 'restore' argument is default False; 9253 if set True will remove old values in table first. 9254 'id_map' if set to None will not map ids. 9255 The import will keep the id numbers in the restored table. 9256 This assumes that there is an field of type id that 9257 is integer and in incrementing order. 9258 Will keep the id numbers in restored table. 9259 """ 9260 9261 delimiter = kwargs.get('delimiter', ',') 9262 quotechar = kwargs.get('quotechar', '"') 9263 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 9264 restore = kwargs.get('restore', False) 9265 if restore: 9266 self._db[self].truncate() 9267 9268 reader = csv.reader(csvfile, delimiter=delimiter, 9269 quotechar=quotechar, quoting=quoting) 9270 colnames = None 9271 if isinstance(id_map, dict): 9272 if not self._tablename in id_map: 9273 id_map[self._tablename] = {} 9274 id_map_self = id_map[self._tablename] 9275 9276 def fix(field, value, id_map, id_offset): 9277 list_reference_s='list:reference' 9278 if value == null: 9279 value = None 9280 elif field.type=='blob': 9281 value = base64.b64decode(value) 9282 elif field.type=='double' or field.type=='float': 9283 if not value.strip(): 9284 value = None 9285 else: 9286 value = float(value) 9287 elif field.type in ('integer','bigint'): 9288 if not value.strip(): 9289 value = None 9290 else: 9291 value = long(value) 9292 elif field.type.startswith('list:string'): 9293 value = bar_decode_string(value) 9294 elif field.type.startswith(list_reference_s): 9295 ref_table = field.type[len(list_reference_s):].strip() 9296 if id_map is not None: 9297 value = [id_map[ref_table][long(v)] \ 9298 for v in bar_decode_string(value)] 9299 else: 9300 value = [v for v in bar_decode_string(value)] 9301 elif field.type.startswith('list:'): 9302 value = bar_decode_integer(value) 9303 elif id_map and field.type.startswith('reference'): 9304 try: 9305 value = id_map[field.type[9:].strip()][long(value)] 9306 except KeyError: 9307 pass 9308 elif id_offset and field.type.startswith('reference'): 9309 try: 9310 value = id_offset[field.type[9:].strip()]+long(value) 9311 except KeyError: 9312 pass 9313 return (field.name, value)
9314 9315 def is_id(colname): 9316 if colname in self: 9317 return self[colname].type == 'id' 9318 else: 9319 return False 9320 9321 first = True 9322 unique_idx = None 9323 for lineno, line in enumerate(reader): 9324 if not line: 9325 break 9326 if not colnames: 9327 # assume this is the first line of the input, contains colnames 9328 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 9329 cols, cid = [], None 9330 for i,colname in enumerate(colnames): 9331 if is_id(colname): 9332 cid = i 9333 elif colname in self.fields: 9334 cols.append((i,self[colname])) 9335 if colname == unique: 9336 unique_idx = i 9337 else: 9338 # every other line contains instead data 9339 items = [] 9340 for i, field in cols: 9341 try: 9342 items.append(fix(field, line[i], id_map, id_offset)) 9343 except ValueError: 9344 raise RuntimeError("Unable to parse line:%s field:%s value:'%s'" 9345 % (lineno+1,field,line[i])) 9346 9347 if not (id_map or cid is None or id_offset is None or unique_idx): 9348 csv_id = long(line[cid]) 9349 curr_id = self.insert(**dict(items)) 9350 if first: 9351 first = False 9352 # First curr_id is bigger than csv_id, 9353 # then we are not restoring but 9354 # extending db table with csv db table 9355 id_offset[self._tablename] = (curr_id-csv_id) \ 9356 if curr_id>csv_id else 0 9357 # create new id until we get the same as old_id+offset 9358 while curr_id<csv_id+id_offset[self._tablename]: 9359 self._db(self._db[self][colnames[cid]] == curr_id).delete() 9360 curr_id = self.insert(**dict(items)) 9361 # Validation. Check for duplicate of 'unique' &, 9362 # if present, update instead of insert. 9363 elif not unique_idx: 9364 new_id = self.insert(**dict(items)) 9365 else: 9366 unique_value = line[unique_idx] 9367 query = self._db[self][unique] == unique_value 9368 record = self._db(query).select().first() 9369 if record: 9370 record.update_record(**dict(items)) 9371 new_id = record[self._id.name] 9372 else: 9373 new_id = self.insert(**dict(items)) 9374 if id_map and cid is not None: 9375 id_map_self[long(line[cid])] = new_id 9376
9377 - def as_dict(self, flat=False, sanitize=True):
9378 table_as_dict = dict( 9379 tablename=str(self), 9380 fields=[], 9381 sequence_name=self._sequence_name, 9382 trigger_name=self._trigger_name, 9383 common_filter=self._common_filter, 9384 format=self._format, 9385 singular=self._singular, 9386 plural=self._plural) 9387 9388 for field in self: 9389 if (field.readable or field.writable) or (not sanitize): 9390 table_as_dict["fields"].append(field.as_dict( 9391 flat=flat, sanitize=sanitize)) 9392 return table_as_dict
9393
9394 - def as_xml(self, sanitize=True):
9395 if not have_serializers: 9396 raise ImportError("No xml serializers available") 9397 d = self.as_dict(flat=True, sanitize=sanitize) 9398 return serializers.xml(d)
9399
9400 - def as_json(self, sanitize=True):
9401 if not have_serializers: 9402 raise ImportError("No json serializers available") 9403 d = self.as_dict(flat=True, sanitize=sanitize) 9404 return serializers.json(d)
9405
9406 - def as_yaml(self, sanitize=True):
9407 if not have_serializers: 9408 raise ImportError("No YAML serializers available") 9409 d = self.as_dict(flat=True, sanitize=sanitize) 9410 return serializers.yaml(d)
9411
9412 - def with_alias(self, alias):
9413 return self._db._adapter.alias(self, alias)
9414
9415 - def on(self, query):
9416 return Expression(self._db, self._db._adapter.ON, self, query)
9417
9418 9419 -def archive_record(qset, fs, archive_table, current_record):
9420 tablenames = qset.db._adapter.tables(qset.query) 9421 if len(tablenames) != 1: 9422 raise RuntimeError("cannot update join") 9423 for row in qset.select(): 9424 fields = archive_table._filter_fields(row) 9425 fields[current_record] = row.id 9426 archive_table.insert(**fields) 9427 return False
9428
9429 9430 -class Expression(object):
9431
9432 - def __init__( 9433 self, 9434 db, 9435 op, 9436 first=None, 9437 second=None, 9438 type=None, 9439 **optional_args 9440 ):
9441 9442 self.db = db 9443 self.op = op 9444 self.first = first 9445 self.second = second 9446 self._table = getattr(first,'_table',None) 9447 ### self._tablename = first._tablename ## CHECK 9448 if not type and first and hasattr(first,'type'): 9449 self.type = first.type 9450 else: 9451 self.type = type 9452 self.optional_args = optional_args
9453
9454 - def sum(self):
9455 db = self.db 9456 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
9457
9458 - def max(self):
9459 db = self.db 9460 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
9461
9462 - def min(self):
9463 db = self.db 9464 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
9465
9466 - def len(self):
9467 db = self.db 9468 return Expression(db, db._adapter.LENGTH, self, None, 'integer')
9469
9470 - def avg(self):
9471 db = self.db 9472 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
9473
9474 - def abs(self):
9475 db = self.db 9476 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
9477
9478 - def lower(self):
9479 db = self.db 9480 return Expression(db, db._adapter.LOWER, self, None, self.type)
9481
9482 - def upper(self):
9483 db = self.db 9484 return Expression(db, db._adapter.UPPER, self, None, self.type)
9485
9486 - def replace(self, a, b):
9487 db = self.db 9488 return Expression(db, db._adapter.REPLACE, self, (a, b), self.type)
9489
9490 - def year(self):
9491 db = self.db 9492 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
9493
9494 - def month(self):
9495 db = self.db 9496 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
9497
9498 - def day(self):
9499 db = self.db 9500 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
9501
9502 - def hour(self):
9503 db = self.db 9504 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
9505
9506 - def minutes(self):
9507 db = self.db 9508 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
9509
9510 - def coalesce(self, *others):
9511 db = self.db 9512 return Expression(db, db._adapter.COALESCE, self, others, self.type)
9513
9514 - def coalesce_zero(self):
9515 db = self.db 9516 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
9517
9518 - def seconds(self):
9519 db = self.db 9520 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
9521
9522 - def epoch(self):
9523 db = self.db 9524 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
9525
9526 - def __getslice__(self, start, stop):
9527 db = self.db 9528 if start < 0: 9529 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 9530 else: 9531 pos0 = start + 1 9532 9533 if stop < 0: 9534 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 9535 elif stop == sys.maxint: 9536 length = self.len() 9537 else: 9538 length = '(%s - %s)' % (stop + 1, pos0) 9539 return Expression(db, db._adapter.SUBSTRING, 9540 self, (pos0, length), self.type)
9541
9542 - def __getitem__(self, i):
9543 return self[i:i + 1]
9544
9545 - def __str__(self):
9546 return self.db._adapter.expand(self, self.type)
9547
9548 - def __or__(self, other): # for use in sortby
9549 db = self.db 9550 return Expression(db, db._adapter.COMMA, self, other, self.type)
9551
9552 - def __invert__(self):
9553 db = self.db 9554 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 9555 return self.first 9556 return Expression(db, db._adapter.INVERT, self, type=self.type)
9557
9558 - def __add__(self, other):
9559 db = self.db 9560 return Expression(db, db._adapter.ADD, self, other, self.type)
9561
9562 - def __sub__(self, other):
9563 db = self.db 9564 if self.type in ('integer', 'bigint'): 9565 result_type = 'integer' 9566 elif self.type in ['date','time','datetime','double','float']: 9567 result_type = 'double' 9568 elif self.type.startswith('decimal('): 9569 result_type = self.type 9570 else: 9571 raise SyntaxError("subtraction operation not supported for type") 9572 return Expression(db,db._adapter.SUB,self,other,result_type)
9573
9574 - def __mul__(self, other):
9575 db = self.db 9576 return Expression(db,db._adapter.MUL,self,other,self.type)
9577
9578 - def __div__(self, other):
9579 db = self.db 9580 return Expression(db,db._adapter.DIV,self,other,self.type)
9581
9582 - def __mod__(self, other):
9583 db = self.db 9584 return Expression(db,db._adapter.MOD,self,other,self.type)
9585
9586 - def __eq__(self, value):
9587 db = self.db 9588 return Query(db, db._adapter.EQ, self, value)
9589
9590 - def __ne__(self, value):
9591 db = self.db 9592 return Query(db, db._adapter.NE, self, value)
9593
9594 - def __lt__(self, value):
9595 db = self.db 9596 return Query(db, db._adapter.LT, self, value)
9597
9598 - def __le__(self, value):
9599 db = self.db 9600 return Query(db, db._adapter.LE, self, value)
9601
9602 - def __gt__(self, value):
9603 db = self.db 9604 return Query(db, db._adapter.GT, self, value)
9605
9606 - def __ge__(self, value):
9607 db = self.db 9608 return Query(db, db._adapter.GE, self, value)
9609
9610 - def like(self, value, case_sensitive=False):
9611 db = self.db 9612 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 9613 return Query(db, op, self, value)
9614
9615 - def regexp(self, value):
9616 db = self.db 9617 return Query(db, db._adapter.REGEXP, self, value)
9618
9619 - def belongs(self, *value, **kwattr):
9620 """ 9621 Accepts the following inputs: 9622 field.belongs(1,2) 9623 field.belongs((1,2)) 9624 field.belongs(query) 9625 9626 Does NOT accept: 9627 field.belongs(1) 9628 """ 9629 db = self.db 9630 if len(value) == 1: 9631 value = value[0] 9632 if isinstance(value,Query): 9633 value = db(value)._select(value.first._table._id) 9634 elif not isinstance(value, basestring): 9635 value = set(value) 9636 if kwattr.get('null') and None in value: 9637 value.remove(None) 9638 return (self == None) | Query(db, db._adapter.BELONGS, self, value) 9639 return Query(db, db._adapter.BELONGS, self, value)
9640
9641 - def startswith(self, value):
9642 db = self.db 9643 if not self.type in ('string', 'text', 'json', 'upload'): 9644 raise SyntaxError("startswith used with incompatible field type") 9645 return Query(db, db._adapter.STARTSWITH, self, value)
9646
9647 - def endswith(self, value):
9648 db = self.db 9649 if not self.type in ('string', 'text', 'json', 'upload'): 9650 raise SyntaxError("endswith used with incompatible field type") 9651 return Query(db, db._adapter.ENDSWITH, self, value)
9652
9653 - def contains(self, value, all=False, case_sensitive=False):
9654 """ 9655 The case_sensitive parameters is only useful for PostgreSQL 9656 For other RDMBs it is ignored and contains is always case in-sensitive 9657 For MongoDB and GAE contains is always case sensitive 9658 """ 9659 db = self.db 9660 if isinstance(value,(list, tuple)): 9661 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 9662 for v in value if str(v).strip()] 9663 if not subqueries: 9664 return self.contains('') 9665 else: 9666 return reduce(all and AND or OR,subqueries) 9667 if not self.type in ('string', 'text', 'json', 'upload') and not self.type.startswith('list:'): 9668 raise SyntaxError("contains used with incompatible field type") 9669 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
9670
9671 - def with_alias(self, alias):
9672 db = self.db 9673 return Expression(db, db._adapter.AS, self, alias, self.type)
9674 9675 # GIS expressions 9676
9677 - def st_asgeojson(self, precision=15, options=0, version=1):
9678 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 9679 dict(precision=precision, options=options, 9680 version=version), 'string')
9681
9682 - def st_astext(self):
9683 db = self.db 9684 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
9685
9686 - def st_x(self):
9687 db = self.db 9688 return Expression(db, db._adapter.ST_X, self, type='string')
9689
9690 - def st_y(self):
9691 db = self.db 9692 return Expression(db, db._adapter.ST_Y, self, type='string')
9693
9694 - def st_distance(self, other):
9695 db = self.db 9696 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
9697
9698 - def st_simplify(self, value):
9699 db = self.db 9700 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
9701 9702 # GIS queries 9703
9704 - def st_contains(self, value):
9705 db = self.db 9706 return Query(db, db._adapter.ST_CONTAINS, self, value)
9707
9708 - def st_equals(self, value):
9709 db = self.db 9710 return Query(db, db._adapter.ST_EQUALS, self, value)
9711
9712 - def st_intersects(self, value):
9713 db = self.db 9714 return Query(db, db._adapter.ST_INTERSECTS, self, value)
9715
9716 - def st_overlaps(self, value):
9717 db = self.db 9718 return Query(db, db._adapter.ST_OVERLAPS, self, value)
9719
9720 - def st_touches(self, value):
9721 db = self.db 9722 return Query(db, db._adapter.ST_TOUCHES, self, value)
9723
9724 - def st_within(self, value):
9725 db = self.db 9726 return Query(db, db._adapter.ST_WITHIN, self, value)
9727
9728 - def st_dwithin(self, value, distance):
9729 db = self.db 9730 return Query(db, db._adapter.ST_DWITHIN, self, (value, distance))
9731
9732 # for use in both Query and sortby 9733 9734 9735 -class SQLCustomType(object):
9736 """ 9737 allows defining of custom SQL types 9738 9739 Example:: 9740 9741 decimal = SQLCustomType( 9742 type ='double', 9743 native ='integer', 9744 encoder =(lambda x: int(float(x) * 100)), 9745 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 9746 ) 9747 9748 db.define_table( 9749 'example', 9750 Field('value', type=decimal) 9751 ) 9752 9753 :param type: the web2py type (default = 'string') 9754 :param native: the backend type 9755 :param encoder: how to encode the value to store it in the backend 9756 :param decoder: how to decode the value retrieved from the backend 9757 :param validator: what validators to use ( default = None, will use the 9758 default validator for type) 9759 """ 9760
9761 - def __init__( 9762 self, 9763 type='string', 9764 native=None, 9765 encoder=None, 9766 decoder=None, 9767 validator=None, 9768 _class=None, 9769 ):
9770 9771 self.type = type 9772 self.native = native 9773 self.encoder = encoder or (lambda x: x) 9774 self.decoder = decoder or (lambda x: x) 9775 self.validator = validator 9776 self._class = _class or type
9777
9778 - def startswith(self, text=None):
9779 try: 9780 return self.type.startswith(self, text) 9781 except TypeError: 9782 return False
9783
9784 - def endswith(self, text=None):
9785 try: 9786 return self.type.endswith(self, text) 9787 except TypeError: 9788 return False
9789
9790 - def __getslice__(self, a=0, b=100):
9791 return None
9792
9793 - def __getitem__(self, i):
9794 return None
9795
9796 - def __str__(self):
9797 return self._class
9798
9799 -class FieldVirtual(object):
9800 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9801 # for backward compatibility 9802 (self.name, self.f) = (name, f) if f else ('unknown', name) 9803 self.type = ftype 9804 self.label = label or self.name.capitalize().replace('_',' ') 9805 self.represent = lambda v,r=None:v 9806 self.formatter = IDENTITY 9807 self.comment = None 9808 self.readable = True 9809 self.writable = False 9810 self.requires = None 9811 self.widget = None 9812 self.tablename = table_name 9813 self.filter_out = None
9814 - def __str__(self):
9815 return '%s.%s' % (self.tablename, self.name)
9816
9817 9818 -class FieldMethod(object):
9819 - def __init__(self, name, f=None, handler=None):
9820 # for backward compatibility 9821 (self.name, self.f) = (name, f) if f else ('unknown', name) 9822 self.handler = handler
9823
9824 9825 -def list_represent(x,r=None):
9826 return ', '.join(str(y) for y in x or [])
9827
9828 9829 -class Field(Expression):
9830 9831 Virtual = FieldVirtual 9832 Method = FieldMethod 9833 Lazy = FieldMethod # for backward compatibility 9834 9835 """ 9836 an instance of this class represents a database field 9837 9838 example:: 9839 9840 a = Field(name, 'string', length=32, default=None, required=False, 9841 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9842 notnull=False, unique=False, 9843 uploadfield=True, widget=None, label=None, comment=None, 9844 uploadfield=True, # True means store on disk, 9845 # 'a_field_name' means store in this field in db 9846 # False means file content will be discarded. 9847 writable=True, readable=True, update=None, authorize=None, 9848 autodelete=False, represent=None, uploadfolder=None, 9849 uploadseparate=False # upload to separate directories by uuid_keys 9850 # first 2 character and tablename.fieldname 9851 # False - old behavior 9852 # True - put uploaded file in 9853 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9854 # directory) 9855 uploadfs=None # a pyfilesystem where to store upload 9856 9857 to be used as argument of DAL.define_table 9858 9859 allowed field types: 9860 string, boolean, integer, double, text, blob, 9861 date, time, datetime, upload, password 9862 9863 """ 9864
9865 - def __init__( 9866 self, 9867 fieldname, 9868 type='string', 9869 length=None, 9870 default=DEFAULT, 9871 required=False, 9872 requires=DEFAULT, 9873 ondelete='CASCADE', 9874 notnull=False, 9875 unique=False, 9876 uploadfield=True, 9877 widget=None, 9878 label=None, 9879 comment=None, 9880 writable=True, 9881 readable=True, 9882 update=None, 9883 authorize=None, 9884 autodelete=False, 9885 represent=None, 9886 uploadfolder=None, 9887 uploadseparate=False, 9888 uploadfs=None, 9889 compute=None, 9890 custom_store=None, 9891 custom_retrieve=None, 9892 custom_retrieve_file_properties=None, 9893 custom_delete=None, 9894 filter_in=None, 9895 filter_out=None, 9896 custom_qualifier=None, 9897 map_none=None, 9898 rname=None 9899 ):
9900 self._db = self.db = None # both for backward compatibility 9901 self.op = None 9902 self.first = None 9903 self.second = None 9904 if isinstance(fieldname, unicode): 9905 try: 9906 fieldname = str(fieldname) 9907 except UnicodeEncodeError: 9908 raise SyntaxError('Field: invalid unicode field name') 9909 self.name = fieldname = cleanup(fieldname) 9910 if not isinstance(fieldname, str) or hasattr(Table, fieldname) or \ 9911 fieldname[0] == '_' or '.' in fieldname or \ 9912 REGEX_PYTHON_KEYWORDS.match(fieldname): 9913 raise SyntaxError('Field: invalid field name: %s, ' 9914 'use rname for "funny" names' % fieldname) 9915 9916 if not isinstance(type, (Table, Field)): 9917 self.type = type 9918 else: 9919 self.type = 'reference %s' % type 9920 9921 self.length = length if not length is None else DEFAULTLENGTH.get(self.type, 512) 9922 self.default = default if default != DEFAULT else (update or None) 9923 self.required = required # is this field required 9924 self.ondelete = ondelete.upper() # this is for reference fields only 9925 self.notnull = notnull 9926 self.unique = unique 9927 self.uploadfield = uploadfield 9928 self.uploadfolder = uploadfolder 9929 self.uploadseparate = uploadseparate 9930 self.uploadfs = uploadfs 9931 self.widget = widget 9932 self.comment = comment 9933 self.writable = writable 9934 self.readable = readable 9935 self.update = update 9936 self.authorize = authorize 9937 self.autodelete = autodelete 9938 self.represent = (list_represent if represent is None and 9939 type in ('list:integer', 'list:string') else represent) 9940 self.compute = compute 9941 self.isattachment = True 9942 self.custom_store = custom_store 9943 self.custom_retrieve = custom_retrieve 9944 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9945 self.custom_delete = custom_delete 9946 self.filter_in = filter_in 9947 self.filter_out = filter_out 9948 self.custom_qualifier = custom_qualifier 9949 self.label = (label if label is not None else 9950 fieldname.replace('_', ' ').title()) 9951 self.requires = requires if requires is not None else [] 9952 self.map_none = map_none 9953 self._rname = rname
9954
9955 - def set_attributes(self, *args, **attributes):
9956 self.__dict__.update(*args, **attributes)
9957
9958 - def clone(self, point_self_references_to=False, **args):
9959 field = copy.copy(self) 9960 if point_self_references_to and \ 9961 field.type == 'reference %s'+field._tablename: 9962 field.type = 'reference %s' % point_self_references_to 9963 field.__dict__.update(args) 9964 return field
9965
9966 - def store(self, file, filename=None, path=None):
9967 if self.custom_store: 9968 return self.custom_store(file, filename, path) 9969 if isinstance(file, cgi.FieldStorage): 9970 filename = filename or file.filename 9971 file = file.file 9972 elif not filename: 9973 filename = file.name 9974 filename = os.path.basename(filename.replace('/', os.sep).replace('\\', os.sep)) 9975 m = REGEX_STORE_PATTERN.search(filename) 9976 extension = m and m.group('e') or 'txt' 9977 uuid_key = web2py_uuid().replace('-', '')[-16:] 9978 encoded_filename = base64.b16encode(filename).lower() 9979 newfilename = '%s.%s.%s.%s' % \ 9980 (self._tablename, self.name, uuid_key, encoded_filename) 9981 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9982 self_uploadfield = self.uploadfield 9983 if isinstance(self_uploadfield, Field): 9984 blob_uploadfield_name = self_uploadfield.uploadfield 9985 keys = {self_uploadfield.name: newfilename, 9986 blob_uploadfield_name: file.read()} 9987 self_uploadfield.table.insert(**keys) 9988 elif self_uploadfield is True: 9989 if path: 9990 pass 9991 elif self.uploadfolder: 9992 path = self.uploadfolder 9993 elif self.db._adapter.folder: 9994 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9995 else: 9996 raise RuntimeError( 9997 "you must specify a Field(...,uploadfolder=...)") 9998 if self.uploadseparate: 9999 if self.uploadfs: 10000 raise RuntimeError("not supported") 10001 path = pjoin(path, "%s.%s" % ( 10002 self._tablename, self.name), uuid_key[:2] 10003 ) 10004 if not exists(path): 10005 os.makedirs(path) 10006 pathfilename = pjoin(path, newfilename) 10007 if self.uploadfs: 10008 dest_file = self.uploadfs.open(newfilename, 'wb') 10009 else: 10010 dest_file = open(pathfilename, 'wb') 10011 try: 10012 shutil.copyfileobj(file, dest_file) 10013 except IOError: 10014 raise IOError( 10015 'Unable to store file "%s" because invalid permissions, ' 10016 'readonly file system, or filename too long' % pathfilename) 10017 dest_file.close() 10018 return newfilename
10019
10020 - def retrieve(self, name, path=None, nameonly=False):
10021 """ 10022 if nameonly==True return (filename, fullfilename) instead of 10023 (filename, stream) 10024 """ 10025 self_uploadfield = self.uploadfield 10026 if self.custom_retrieve: 10027 return self.custom_retrieve(name, path) 10028 import http 10029 if self.authorize or isinstance(self_uploadfield, str): 10030 row = self.db(self == name).select().first() 10031 if not row: 10032 raise http.HTTP(404) 10033 if self.authorize and not self.authorize(row): 10034 raise http.HTTP(403) 10035 file_properties = self.retrieve_file_properties(name, path) 10036 filename = file_properties['filename'] 10037 if isinstance(self_uploadfield, str): # ## if file is in DB 10038 stream = StringIO.StringIO(row[self_uploadfield] or '') 10039 elif isinstance(self_uploadfield, Field): 10040 blob_uploadfield_name = self_uploadfield.uploadfield 10041 query = self_uploadfield == name 10042 data = self_uploadfield.table(query)[blob_uploadfield_name] 10043 stream = StringIO.StringIO(data) 10044 elif self.uploadfs: 10045 # ## if file is on pyfilesystem 10046 stream = self.uploadfs.open(name, 'rb') 10047 else: 10048 # ## if file is on regular filesystem 10049 # this is intentially a sting with filename and not a stream 10050 # this propagates and allows stream_file_or_304_or_206 to be called 10051 fullname = pjoin(file_properties['path'], name) 10052 if nameonly: 10053 return (filename, fullname) 10054 stream = open(fullname, 'rb') 10055 return (filename, stream)
10056
10057 - def retrieve_file_properties(self, name, path=None):
10058 m = REGEX_UPLOAD_PATTERN.match(name) 10059 if not m or not self.isattachment: 10060 raise TypeError('Can\'t retrieve %s file properties' % name) 10061 self_uploadfield = self.uploadfield 10062 if self.custom_retrieve_file_properties: 10063 return self.custom_retrieve_file_properties(name, path) 10064 if m.group('name'): 10065 try: 10066 filename = base64.b16decode(m.group('name'), True) 10067 filename = REGEX_CLEANUP_FN.sub('_', filename) 10068 except (TypeError, AttributeError): 10069 filename = name 10070 else: 10071 filename = name 10072 # ## if file is in DB 10073 if isinstance(self_uploadfield, (str, Field)): 10074 return dict(path=None, filename=filename) 10075 # ## if file is on filesystem 10076 if not path: 10077 if self.uploadfolder: 10078 path = self.uploadfolder 10079 else: 10080 path = pjoin(self.db._adapter.folder, '..', 'uploads') 10081 if self.uploadseparate: 10082 t = m.group('table') 10083 f = m.group('field') 10084 u = m.group('uuidkey') 10085 path = pjoin(path, "%s.%s" % (t, f), u[:2]) 10086 return dict(path=path, filename=filename)
10087
10088 - def formatter(self, value):
10089 requires = self.requires 10090 if value is None or not requires: 10091 return value or self.map_none 10092 if not isinstance(requires, (list, tuple)): 10093 requires = [requires] 10094 elif isinstance(requires, tuple): 10095 requires = list(requires) 10096 else: 10097 requires = copy.copy(requires) 10098 requires.reverse() 10099 for item in requires: 10100 if hasattr(item, 'formatter'): 10101 value = item.formatter(value) 10102 return value
10103
10104 - def validate(self, value):
10105 if not self.requires or self.requires == DEFAULT: 10106 return ((value if value != self.map_none else None), None) 10107 requires = self.requires 10108 if not isinstance(requires, (list, tuple)): 10109 requires = [requires] 10110 for validator in requires: 10111 (value, error) = validator(value) 10112 if error: 10113 return (value, error) 10114 return ((value if value != self.map_none else None), None)
10115
10116 - def count(self, distinct=None):
10117 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
10118
10119 - def as_dict(self, flat=False, sanitize=True):
10120 attrs = ( 10121 'name', 'authorize', 'represent', 'ondelete', 10122 'custom_store', 'autodelete', 'custom_retrieve', 10123 'filter_out', 'uploadseparate', 'widget', 'uploadfs', 10124 'update', 'custom_delete', 'uploadfield', 'uploadfolder', 10125 'custom_qualifier', 'unique', 'writable', 'compute', 10126 'map_none', 'default', 'type', 'required', 'readable', 10127 'requires', 'comment', 'label', 'length', 'notnull', 10128 'custom_retrieve_file_properties', 'filter_in') 10129 serializable = (int, long, basestring, float, tuple, 10130 bool, type(None)) 10131 10132 def flatten(obj): 10133 if isinstance(obj, dict): 10134 return dict((flatten(k), flatten(v)) for k, v in obj.items()) 10135 elif isinstance(obj, (tuple, list, set)): 10136 return [flatten(v) for v in obj] 10137 elif isinstance(obj, serializable): 10138 return obj 10139 elif isinstance(obj, (datetime.datetime, 10140 datetime.date, datetime.time)): 10141 return str(obj) 10142 else: 10143 return None
10144 10145 d = dict() 10146 if not (sanitize and not (self.readable or self.writable)): 10147 for attr in attrs: 10148 if flat: 10149 d.update({attr: flatten(getattr(self, attr))}) 10150 else: 10151 d.update({attr: getattr(self, attr)}) 10152 d["fieldname"] = d.pop("name") 10153 return d
10154
10155 - def as_xml(self, sanitize=True):
10156 if have_serializers: 10157 xml = serializers.xml 10158 else: 10159 raise ImportError("No xml serializers available") 10160 d = self.as_dict(flat=True, sanitize=sanitize) 10161 return xml(d)
10162
10163 - def as_json(self, sanitize=True):
10164 if have_serializers: 10165 json = serializers.json 10166 else: 10167 raise ImportError("No json serializers available") 10168 d = self.as_dict(flat=True, sanitize=sanitize) 10169 return json(d)
10170
10171 - def as_yaml(self, sanitize=True):
10172 if have_serializers: 10173 d = self.as_dict(flat=True, sanitize=sanitize) 10174 return serializers.yaml(d) 10175 else: 10176 raise ImportError("No YAML serializers available")
10177
10178 - def __nonzero__(self):
10179 return True
10180
10181 - def __str__(self):
10182 try: 10183 return '%s.%s' % (self.tablename, self.name) 10184 except: 10185 return '<no table>.%s' % self.name
10186 10187 @property
10188 - def sqlsafe(self):
10189 if self._table: 10190 return self._table.sqlsafe + '.' + \ 10191 (self._rname or self._db._adapter.sqlsafe_field(self.name)) 10192 return '<no table>.%s' % self.name
10193 10194 @property
10195 - def sqlsafe_name(self):
10196 return self._rname or self._db._adapter.sqlsafe_field(self.name)
10197
10198 10199 -class Query(object):
10200 10201 """ 10202 a query object necessary to define a set. 10203 it can be stored or can be passed to DAL.__call__() to obtain a Set 10204 10205 Example:: 10206 10207 query = db.users.name=='Max' 10208 set = db(query) 10209 records = set.select() 10210 10211 """ 10212
10213 - def __init__( 10214 self, 10215 db, 10216 op, 10217 first=None, 10218 second=None, 10219 ignore_common_filters=False, 10220 **optional_args 10221 ):
10222 self.db = self._db = db 10223 self.op = op 10224 self.first = first 10225 self.second = second 10226 self.ignore_common_filters = ignore_common_filters 10227 self.optional_args = optional_args
10228
10229 - def __repr__(self):
10230 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
10231
10232 - def __str__(self):
10233 return str(self.db._adapter.expand(self))
10234
10235 - def __and__(self, other):
10236 return Query(self.db,self.db._adapter.AND,self,other)
10237 10238 __rand__ = __and__ 10239
10240 - def __or__(self, other):
10241 return Query(self.db,self.db._adapter.OR,self,other)
10242 10243 __ror__ = __or__ 10244
10245 - def __invert__(self):
10246 if self.op==self.db._adapter.NOT: 10247 return self.first 10248 return Query(self.db,self.db._adapter.NOT,self)
10249
10250 - def __eq__(self, other):
10251 return repr(self) == repr(other)
10252
10253 - def __ne__(self, other):
10254 return not (self == other)
10255
10256 - def case(self,t=1,f=0):
10257 return self.db._adapter.CASE(self,t,f)
10258
10259 - def as_dict(self, flat=False, sanitize=True):
10260 """Experimental stuff 10261 10262 This allows to return a plain dictionary with the basic 10263 query representation. Can be used with json/xml services 10264 for client-side db I/O 10265 10266 Example: 10267 >>> q = db.auth_user.id != 0 10268 >>> q.as_dict(flat=True) 10269 {"op": "NE", "first":{"tablename": "auth_user", 10270 "fieldname": "id"}, 10271 "second":0} 10272 """ 10273 10274 SERIALIZABLE_TYPES = (tuple, dict, set, list, int, long, float, 10275 basestring, type(None), bool) 10276 10277 def loop(d): 10278 newd = dict() 10279 for k, v in d.items(): 10280 if k in ("first", "second"): 10281 if isinstance(v, self.__class__): 10282 newd[k] = loop(v.__dict__) 10283 elif isinstance(v, Field): 10284 newd[k] = {"tablename": v._tablename, 10285 "fieldname": v.name} 10286 elif isinstance(v, Expression): 10287 newd[k] = loop(v.__dict__) 10288 elif isinstance(v, SERIALIZABLE_TYPES): 10289 newd[k] = v 10290 elif isinstance(v, (datetime.date, 10291 datetime.time, 10292 datetime.datetime)): 10293 newd[k] = unicode(v) 10294 elif k == "op": 10295 if callable(v): 10296 newd[k] = v.__name__ 10297 elif isinstance(v, basestring): 10298 newd[k] = v 10299 else: pass # not callable or string 10300 elif isinstance(v, SERIALIZABLE_TYPES): 10301 if isinstance(v, dict): 10302 newd[k] = loop(v) 10303 else: newd[k] = v 10304 return newd
10305 10306 if flat: 10307 return loop(self.__dict__) 10308 else: return self.__dict__
10309
10310 - def as_xml(self, sanitize=True):
10311 if have_serializers: 10312 xml = serializers.xml 10313 else: 10314 raise ImportError("No xml serializers available") 10315 d = self.as_dict(flat=True, sanitize=sanitize) 10316 return xml(d)
10317
10318 - def as_json(self, sanitize=True):
10319 if have_serializers: 10320 json = serializers.json 10321 else: 10322 raise ImportError("No json serializers available") 10323 d = self.as_dict(flat=True, sanitize=sanitize) 10324 return json(d)
10325
10326 10327 -def xorify(orderby):
10328 if not orderby: 10329 return None 10330 orderby2 = orderby[0] 10331 for item in orderby[1:]: 10332 orderby2 = orderby2 | item 10333 return orderby2
10334
10335 10336 -def use_common_filters(query):
10337 return (query and hasattr(query,'ignore_common_filters') and \ 10338 not query.ignore_common_filters)
10339
10340 10341 -class Set(object):
10342 10343 """ 10344 a Set represents a set of records in the database, 10345 the records are identified by the query=Query(...) object. 10346 normally the Set is generated by DAL.__call__(Query(...)) 10347 10348 given a set, for example 10349 set = db(db.users.name=='Max') 10350 you can: 10351 set.update(db.users.name='Massimo') 10352 set.delete() # all elements in the set 10353 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 10354 and take subsets: 10355 subset = set(db.users.id<5) 10356 """ 10357
10358 - def __init__(self, db, query, ignore_common_filters = None):
10359 self.db = db 10360 self._db = db # for backward compatibility 10361 self.dquery = None 10362 10363 # if query is a dict, parse it 10364 if isinstance(query, dict): 10365 query = self.parse(query) 10366 10367 if not ignore_common_filters is None and \ 10368 use_common_filters(query) == ignore_common_filters: 10369 query = copy.copy(query) 10370 query.ignore_common_filters = ignore_common_filters 10371 self.query = query
10372
10373 - def __repr__(self):
10374 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
10375
10376 - def __call__(self, query, ignore_common_filters=False):
10377 if query is None: 10378 return self 10379 elif isinstance(query,Table): 10380 query = self.db._adapter.id_query(query) 10381 elif isinstance(query,str): 10382 query = Expression(self.db,query) 10383 elif isinstance(query,Field): 10384 query = query!=None 10385 if self.query: 10386 return Set(self.db, self.query & query, 10387 ignore_common_filters=ignore_common_filters) 10388 else: 10389 return Set(self.db, query, 10390 ignore_common_filters=ignore_common_filters)
10391
10392 - def _count(self,distinct=None):
10393 return self.db._adapter._count(self.query,distinct)
10394
10395 - def _select(self, *fields, **attributes):
10396 adapter = self.db._adapter 10397 tablenames = adapter.tables(self.query, 10398 attributes.get('join',None), 10399 attributes.get('left',None), 10400 attributes.get('orderby',None), 10401 attributes.get('groupby',None)) 10402 fields = adapter.expand_all(fields, tablenames) 10403 return adapter._select(self.query,fields,attributes)
10404
10405 - def _delete(self):
10406 db = self.db 10407 tablename = db._adapter.get_table(self.query) 10408 return db._adapter._delete(tablename,self.query)
10409
10410 - def _update(self, **update_fields):
10411 db = self.db 10412 tablename = db._adapter.get_table(self.query) 10413 fields = db[tablename]._listify(update_fields,update=True) 10414 return db._adapter._update(tablename,self.query,fields)
10415
10416 - def as_dict(self, flat=False, sanitize=True):
10417 if flat: 10418 uid = dbname = uri = None 10419 codec = self.db._db_codec 10420 if not sanitize: 10421 uri, dbname, uid = (self.db._dbname, str(self.db), 10422 self.db._db_uid) 10423 d = {"query": self.query.as_dict(flat=flat)} 10424 d["db"] = {"uid": uid, "codec": codec, 10425 "name": dbname, "uri": uri} 10426 return d 10427 else: return self.__dict__
10428
10429 - def as_xml(self, sanitize=True):
10430 if have_serializers: 10431 xml = serializers.xml 10432 else: 10433 raise ImportError("No xml serializers available") 10434 d = self.as_dict(flat=True, sanitize=sanitize) 10435 return xml(d)
10436
10437 - def as_json(self, sanitize=True):
10438 if have_serializers: 10439 json = serializers.json 10440 else: 10441 raise ImportError("No json serializers available") 10442 d = self.as_dict(flat=True, sanitize=sanitize) 10443 return json(d)
10444
10445 - def parse(self, dquery):
10446 "Experimental: Turn a dictionary into a Query object" 10447 self.dquery = dquery 10448 return self.build(self.dquery)
10449
10450 - def build(self, d):
10451 "Experimental: see .parse()" 10452 op, first, second = (d["op"], d["first"], 10453 d.get("second", None)) 10454 left = right = built = None 10455 10456 if op in ("AND", "OR"): 10457 if not (type(first), type(second)) == (dict, dict): 10458 raise SyntaxError("Invalid AND/OR query") 10459 if op == "AND": 10460 built = self.build(first) & self.build(second) 10461 else: built = self.build(first) | self.build(second) 10462 10463 elif op == "NOT": 10464 if first is None: 10465 raise SyntaxError("Invalid NOT query") 10466 built = ~self.build(first) 10467 else: 10468 # normal operation (GT, EQ, LT, ...) 10469 for k, v in {"left": first, "right": second}.items(): 10470 if isinstance(v, dict) and v.get("op"): 10471 v = self.build(v) 10472 if isinstance(v, dict) and ("tablename" in v): 10473 v = self.db[v["tablename"]][v["fieldname"]] 10474 if k == "left": left = v 10475 else: right = v 10476 10477 if hasattr(self.db._adapter, op): 10478 opm = getattr(self.db._adapter, op) 10479 10480 if op == "EQ": built = left == right 10481 elif op == "NE": built = left != right 10482 elif op == "GT": built = left > right 10483 elif op == "GE": built = left >= right 10484 elif op == "LT": built = left < right 10485 elif op == "LE": built = left <= right 10486 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 10487 built = Expression(self.db, opm) 10488 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 10489 "COALESCE_ZERO", "RAW", "INVERT"): 10490 built = Expression(self.db, opm, left) 10491 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 10492 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 10493 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 10494 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 10495 "COALESCE", "CONTAINS", "BELONGS"): 10496 built = Expression(self.db, opm, left, right) 10497 # expression as string 10498 elif not (left or right): built = Expression(self.db, op) 10499 else: 10500 raise SyntaxError("Operator not supported: %s" % op) 10501 10502 return built
10503
10504 - def isempty(self):
10505 return not self.select(limitby=(0,1), orderby_on_limitby=False)
10506
10507 - def count(self,distinct=None, cache=None):
10508 db = self.db 10509 if cache: 10510 cache_model, time_expire = cache 10511 sql = self._count(distinct=distinct) 10512 key = db._uri + '/' + sql 10513 if len(key)>200: key = hashlib_md5(key).hexdigest() 10514 return cache_model( 10515 key, 10516 (lambda self=self,distinct=distinct: \ 10517 db._adapter.count(self.query,distinct)), 10518 time_expire) 10519 return db._adapter.count(self.query,distinct)
10520
10521 - def select(self, *fields, **attributes):
10522 adapter = self.db._adapter 10523 tablenames = adapter.tables(self.query, 10524 attributes.get('join',None), 10525 attributes.get('left',None), 10526 attributes.get('orderby',None), 10527 attributes.get('groupby',None)) 10528 fields = adapter.expand_all(fields, tablenames) 10529 return adapter.select(self.query,fields,attributes)
10530
10531 - def nested_select(self,*fields,**attributes):
10532 return Expression(self.db,self._select(*fields,**attributes))
10533
10534 - def delete(self):
10535 db = self.db 10536 tablename = db._adapter.get_table(self.query) 10537 table = db[tablename] 10538 if any(f(self) for f in table._before_delete): return 0 10539 ret = db._adapter.delete(tablename,self.query) 10540 ret and [f(self) for f in table._after_delete] 10541 return ret
10542
10543 - def update(self, **update_fields):
10544 db = self.db 10545 tablename = db._adapter.get_table(self.query) 10546 table = db[tablename] 10547 table._attempt_upload(update_fields) 10548 if any(f(self,update_fields) for f in table._before_update): 10549 return 0 10550 fields = table._listify(update_fields,update=True) 10551 if not fields: 10552 raise SyntaxError("No fields to update") 10553 ret = db._adapter.update("%s" % table._tablename,self.query,fields) 10554 ret and [f(self,update_fields) for f in table._after_update] 10555 return ret
10556
10557 - def update_naive(self, **update_fields):
10558 """ 10559 same as update but does not call table._before_update and _after_update 10560 """ 10561 tablename = self.db._adapter.get_table(self.query) 10562 table = self.db[tablename] 10563 fields = table._listify(update_fields,update=True) 10564 if not fields: raise SyntaxError("No fields to update") 10565 10566 ret = self.db._adapter.update("%s" % table,self.query,fields) 10567 return ret
10568
10569 - def validate_and_update(self, **update_fields):
10570 tablename = self.db._adapter.get_table(self.query) 10571 response = Row() 10572 response.errors = Row() 10573 new_fields = copy.copy(update_fields) 10574 for key,value in update_fields.iteritems(): 10575 value,error = self.db[tablename][key].validate(value) 10576 if error: 10577 response.errors[key] = error 10578 else: 10579 new_fields[key] = value 10580 table = self.db[tablename] 10581 if response.errors: 10582 response.updated = None 10583 else: 10584 if not any(f(self,new_fields) for f in table._before_update): 10585 fields = table._listify(new_fields,update=True) 10586 if not fields: raise SyntaxError("No fields to update") 10587 ret = self.db._adapter.update(tablename,self.query,fields) 10588 ret and [f(self,new_fields) for f in table._after_update] 10589 else: 10590 ret = 0 10591 response.updated = ret 10592 return response
10593
10594 - def delete_uploaded_files(self, upload_fields=None):
10595 table = self.db[self.db._adapter.tables(self.query)[0]] 10596 # ## mind uploadfield==True means file is not in DB 10597 if upload_fields: 10598 fields = upload_fields.keys() 10599 else: 10600 fields = table.fields 10601 fields = [f for f in fields if table[f].type == 'upload' 10602 and table[f].uploadfield == True 10603 and table[f].autodelete] 10604 if not fields: 10605 return False 10606 for record in self.select(*[table[f] for f in fields]): 10607 for fieldname in fields: 10608 field = table[fieldname] 10609 oldname = record.get(fieldname, None) 10610 if not oldname: 10611 continue 10612 if upload_fields and oldname == upload_fields[fieldname]: 10613 continue 10614 if field.custom_delete: 10615 field.custom_delete(oldname) 10616 else: 10617 uploadfolder = field.uploadfolder 10618 if not uploadfolder: 10619 uploadfolder = pjoin( 10620 self.db._adapter.folder, '..', 'uploads') 10621 if field.uploadseparate: 10622 items = oldname.split('.') 10623 uploadfolder = pjoin( 10624 uploadfolder, 10625 "%s.%s" % (items[0], items[1]), 10626 items[2][:2]) 10627 oldpath = pjoin(uploadfolder, oldname) 10628 if exists(oldpath): 10629 os.unlink(oldpath) 10630 return False
10631
10632 -class RecordUpdater(object):
10633 - def __init__(self, colset, table, id):
10634 self.colset, self.db, self.tablename, self.id = \ 10635 colset, table._db, table._tablename, id
10636
10637 - def __call__(self, **fields):
10638 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 10639 table = db[tablename] 10640 newfields = fields or dict(colset) 10641 for fieldname in newfields.keys(): 10642 if not fieldname in table.fields or table[fieldname].type=='id': 10643 del newfields[fieldname] 10644 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 10645 colset.update(newfields) 10646 return colset
10647
10648 -class RecordDeleter(object):
10649 - def __init__(self, table, id):
10650 self.db, self.tablename, self.id = table._db, table._tablename, id
10651 - def __call__(self):
10652 return self.db(self.db[self.tablename]._id==self.id).delete()
10653
10654 -class LazyReferenceGetter(object):
10655 - def __init__(self, table, id):
10656 self.db, self.tablename, self.id = table._db, table._tablename, id
10657 - def __call__(self, other_tablename):
10658 if self.db._lazy_tables is False: 10659 raise AttributeError() 10660 table = self.db[self.tablename] 10661 other_table = self.db[other_tablename] 10662 for rfield in table._referenced_by: 10663 if rfield.table == other_table: 10664 return LazySet(rfield, self.id) 10665 10666 raise AttributeError()
10667
10668 -class LazySet(object):
10669 - def __init__(self, field, id):
10670 self.db, self.tablename, self.fieldname, self.id = \ 10671 field.db, field._tablename, field.name, id
10672 - def _getset(self):
10673 query = self.db[self.tablename][self.fieldname]==self.id 10674 return Set(self.db,query)
10675 - def __repr__(self):
10676 return repr(self._getset())
10677 - def __call__(self, query, ignore_common_filters=False):
10678 return self._getset()(query, ignore_common_filters)
10679 - def _count(self,distinct=None):
10680 return self._getset()._count(distinct)
10681 - def _select(self, *fields, **attributes):
10682 return self._getset()._select(*fields,**attributes)
10683 - def _delete(self):
10684 return self._getset()._delete()
10685 - def _update(self, **update_fields):
10686 return self._getset()._update(**update_fields)
10687 - def isempty(self):
10688 return self._getset().isempty()
10689 - def count(self,distinct=None, cache=None):
10690 return self._getset().count(distinct,cache)
10691 - def select(self, *fields, **attributes):
10692 return self._getset().select(*fields,**attributes)
10693 - def nested_select(self,*fields,**attributes):
10694 return self._getset().nested_select(*fields,**attributes)
10695 - def delete(self):
10696 return self._getset().delete()
10697 - def update(self, **update_fields):
10698 return self._getset().update(**update_fields)
10699 - def update_naive(self, **update_fields):
10700 return self._getset().update_naive(**update_fields)
10701 - def validate_and_update(self, **update_fields):
10702 return self._getset().validate_and_update(**update_fields)
10703 - def delete_uploaded_files(self, upload_fields=None):
10704 return self._getset().delete_uploaded_files(upload_fields)
10705
10706 -class VirtualCommand(object):
10707 - def __init__(self,method,row):
10708 self.method=method 10709 self.row=row
10710 - def __call__(self,*args,**kwargs):
10711 return self.method(self.row,*args,**kwargs)
10712
10713 -def lazy_virtualfield(f):
10714 f.__lazy__ = True 10715 return f
10716
10717 -class Rows(object):
10718 10719 """ 10720 A wrapper for the return value of a select. It basically represents a table. 10721 It has an iterator and each row is represented as a dictionary. 10722 """ 10723 10724 # ## TODO: this class still needs some work to care for ID/OID 10725
10726 - def __init__( 10727 self, 10728 db=None, 10729 records=[], 10730 colnames=[], 10731 compact=True, 10732 rawrows=None 10733 ):
10734 self.db = db 10735 self.records = records 10736 self.colnames = colnames 10737 self.compact = compact 10738 self.response = rawrows
10739
10740 - def __repr__(self):
10741 return '<Rows (%s)>' % len(self.records)
10742
10743 - def setvirtualfields(self,**keyed_virtualfields):
10744 """ 10745 db.define_table('x',Field('number','integer')) 10746 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 10747 10748 from gluon.dal import lazy_virtualfield 10749 10750 class MyVirtualFields(object): 10751 # normal virtual field (backward compatible, discouraged) 10752 def normal_shift(self): return self.x.number+1 10753 # lazy virtual field (because of @staticmethod) 10754 @lazy_virtualfield 10755 def lazy_shift(instance,row,delta=4): return row.x.number+delta 10756 db.x.virtualfields.append(MyVirtualFields()) 10757 10758 for row in db(db.x).select(): 10759 print row.number, row.normal_shift, row.lazy_shift(delta=7) 10760 """ 10761 if not keyed_virtualfields: 10762 return self 10763 for row in self.records: 10764 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 10765 attributes = dir(virtualfields) 10766 if not tablename in row: 10767 box = row[tablename] = Row() 10768 else: 10769 box = row[tablename] 10770 updated = False 10771 for attribute in attributes: 10772 if attribute[0] != '_': 10773 method = getattr(virtualfields,attribute) 10774 if hasattr(method,'__lazy__'): 10775 box[attribute]=VirtualCommand(method,row) 10776 elif type(method)==types.MethodType: 10777 if not updated: 10778 virtualfields.__dict__.update(row) 10779 updated = True 10780 box[attribute]=method() 10781 return self
10782
10783 - def __and__(self,other):
10784 if self.colnames!=other.colnames: 10785 raise Exception('Cannot & incompatible Rows objects') 10786 records = self.records+other.records 10787 return Rows(self.db,records,self.colnames, 10788 compact=self.compact or other.compact)
10789
10790 - def __or__(self,other):
10791 if self.colnames!=other.colnames: 10792 raise Exception('Cannot | incompatible Rows objects') 10793 records = [record for record in other.records 10794 if not record in self.records] 10795 records = self.records + records 10796 return Rows(self.db,records,self.colnames, 10797 compact=self.compact or other.compact)
10798
10799 - def __nonzero__(self):
10800 if len(self.records): 10801 return 1 10802 return 0
10803
10804 - def __len__(self):
10805 return len(self.records)
10806
10807 - def __getslice__(self, a, b):
10808 return Rows(self.db,self.records[a:b],self.colnames,compact=self.compact)
10809
10810 - def __getitem__(self, i):
10811 row = self.records[i] 10812 keys = row.keys() 10813 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10814 return row[row.keys()[0]] 10815 return row
10816
10817 - def __iter__(self):
10818 """ 10819 iterator over records 10820 """ 10821 10822 for i in xrange(len(self)): 10823 yield self[i]
10824
10825 - def __str__(self):
10826 """ 10827 serializes the table into a csv file 10828 """ 10829 10830 s = StringIO.StringIO() 10831 self.export_to_csv_file(s) 10832 return s.getvalue()
10833
10834 - def first(self):
10835 if not self.records: 10836 return None 10837 return self[0]
10838
10839 - def last(self):
10840 if not self.records: 10841 return None 10842 return self[-1]
10843
10844 - def find(self,f,limitby=None):
10845 """ 10846 returns a new Rows object, a subset of the original object, 10847 filtered by the function f 10848 """ 10849 if not self: 10850 return Rows(self.db, [], self.colnames, compact=self.compact) 10851 records = [] 10852 if limitby: 10853 a,b = limitby 10854 else: 10855 a,b = 0,len(self) 10856 k = 0 10857 for i, row in enumerate(self): 10858 if f(row): 10859 if a<=k: records.append(self.records[i]) 10860 k += 1 10861 if k==b: break 10862 return Rows(self.db, records, self.colnames, compact=self.compact)
10863
10864 - def exclude(self, f):
10865 """ 10866 removes elements from the calling Rows object, filtered by the function f, 10867 and returns a new Rows object containing the removed elements 10868 """ 10869 if not self.records: 10870 return Rows(self.db, [], self.colnames, compact=self.compact) 10871 removed = [] 10872 i=0 10873 while i<len(self): 10874 row = self[i] 10875 if f(row): 10876 removed.append(self.records[i]) 10877 del self.records[i] 10878 else: 10879 i += 1 10880 return Rows(self.db, removed, self.colnames, compact=self.compact)
10881
10882 - def sort(self, f, reverse=False):
10883 """ 10884 returns a list of sorted elements (not sorted in place) 10885 """ 10886 rows = Rows(self.db, [], self.colnames, compact=self.compact) 10887 # When compact=True, iterating over self modifies each record, 10888 # so when sorting self, it is necessary to return a sorted 10889 # version of self.records rather than the sorted self directly. 10890 rows.records = [r for (r, s) in sorted(zip(self.records, self), 10891 key=lambda r: f(r[1]), 10892 reverse=reverse)] 10893 return rows
10894
10895 - def group_by_value(self, *fields, **args):
10896 """ 10897 regroups the rows, by one of the fields 10898 """ 10899 one_result = False 10900 if 'one_result' in args: 10901 one_result = args['one_result'] 10902 10903 def build_fields_struct(row, fields, num, groups): 10904 ''' helper function: 10905 ''' 10906 if num > len(fields)-1: 10907 if one_result: 10908 return row 10909 else: 10910 return [row] 10911 10912 key = fields[num] 10913 value = row[key] 10914 10915 if value not in groups: 10916 groups[value] = build_fields_struct(row, fields, num+1, {}) 10917 else: 10918 struct = build_fields_struct(row, fields, num+1, groups[ value ]) 10919 10920 # still have more grouping to do 10921 if type(struct) == type(dict()): 10922 groups[value].update() 10923 # no more grouping, first only is off 10924 elif type(struct) == type(list()): 10925 groups[value] += struct 10926 # no more grouping, first only on 10927 else: 10928 groups[value] = struct 10929 10930 return groups
10931 10932 if len(fields) == 0: 10933 return self 10934 10935 # if select returned no results 10936 if not self.records: 10937 return {} 10938 10939 grouped_row_group = dict() 10940 10941 # build the struct 10942 for row in self: 10943 build_fields_struct(row, fields, 0, grouped_row_group) 10944 10945 return grouped_row_group
10946
10947 - def render(self, i=None, fields=None):
10948 """ 10949 Takes an index and returns a copy of the indexed row with values 10950 transformed via the "represent" attributes of the associated fields. 10951 10952 If no index is specified, a generator is returned for iteration 10953 over all the rows. 10954 10955 fields -- a list of fields to transform (if None, all fields with 10956 "represent" attributes will be transformed). 10957 """ 10958 10959 if i is None: 10960 return (self.render(i, fields=fields) for i in range(len(self))) 10961 import sqlhtml 10962 row = copy.deepcopy(self.records[i]) 10963 keys = row.keys() 10964 tables = [f.tablename for f in fields] if fields \ 10965 else [k for k in keys if k != '_extra'] 10966 for table in tables: 10967 repr_fields = [f.name for f in fields if f.tablename == table] \ 10968 if fields else [k for k in row[table].keys() 10969 if (hasattr(self.db[table], k) and 10970 isinstance(self.db[table][k], Field) 10971 and self.db[table][k].represent)] 10972 for field in repr_fields: 10973 row[table][field] = sqlhtml.represent( 10974 self.db[table][field], row[table][field], row[table]) 10975 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10976 return row[keys[0]] 10977 return row
10978
10979 - def as_list(self, 10980 compact=True, 10981 storage_to_dict=True, 10982 datetime_to_str=False, 10983 custom_types=None):
10984 """ 10985 returns the data as a list or dictionary. 10986 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10987 :param datetime_to_str: convert datetime fields as strings (default False) 10988 """ 10989 (oc, self.compact) = (self.compact, compact) 10990 if storage_to_dict: 10991 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10992 else: 10993 items = [item for item in self] 10994 self.compact = compact 10995 return items
10996
10997 - def as_dict(self, 10998 key='id', 10999 compact=True, 11000 storage_to_dict=True, 11001 datetime_to_str=False, 11002 custom_types=None):
11003 """ 11004 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 11005 11006 :param key: the name of the field to be used as dict key, normally the id 11007 :param compact: ? (default True) 11008 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 11009 :param datetime_to_str: convert datetime fields as strings (default False) 11010 """ 11011 11012 # test for multiple rows 11013 multi = False 11014 f = self.first() 11015 if f and isinstance(key, basestring): 11016 multi = any([isinstance(v, f.__class__) for v in f.values()]) 11017 if (not "." in key) and multi: 11018 # No key provided, default to int indices 11019 def new_key(): 11020 i = 0 11021 while True: 11022 yield i 11023 i += 1
11024 key_generator = new_key() 11025 key = lambda r: key_generator.next() 11026 11027 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 11028 if isinstance(key,str) and key.count('.')==1: 11029 (table, field) = key.split('.') 11030 return dict([(r[table][field],r) for r in rows]) 11031 elif isinstance(key,str): 11032 return dict([(r[key],r) for r in rows]) 11033 else: 11034 return dict([(key(r),r) for r in rows]) 11035
11036 - def as_trees(self, parent_name='parent_id', children_name='children'):
11037 roots = [] 11038 drows = {} 11039 for row in self: 11040 drows[row.id] = row 11041 row[children_name] = [] 11042 for row in self: 11043 parent = row[parent_name] 11044 if parent is None: 11045 roots.append(row) 11046 else: 11047 drows[parent][children_name].append(row) 11048 return roots
11049
11050 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
11051 """ 11052 export data to csv, the first line contains the column names 11053 11054 :param ofile: where the csv must be exported to 11055 :param null: how null values must be represented (default '<NULL>') 11056 :param delimiter: delimiter to separate values (default ',') 11057 :param quotechar: character to use to quote string values (default '"') 11058 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 11059 :param represent: use the fields .represent value (default False) 11060 :param colnames: list of column names to use (default self.colnames) 11061 This will only work when exporting rows objects!!!! 11062 DO NOT use this with db.export_to_csv() 11063 """ 11064 delimiter = kwargs.get('delimiter', ',') 11065 quotechar = kwargs.get('quotechar', '"') 11066 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 11067 represent = kwargs.get('represent', False) 11068 writer = csv.writer(ofile, delimiter=delimiter, 11069 quotechar=quotechar, quoting=quoting) 11070 11071 def unquote_colnames(colnames): 11072 unq_colnames = [] 11073 for col in colnames: 11074 m = self.db._adapter.REGEX_TABLE_DOT_FIELD.match(col) 11075 if not m: 11076 unq_colnames.append(col) 11077 else: 11078 unq_colnames.append('.'.join(m.groups())) 11079 return unq_colnames
11080 11081 colnames = kwargs.get('colnames', self.colnames) 11082 write_colnames = kwargs.get('write_colnames',True) 11083 # a proper csv starting with the column names 11084 if write_colnames: 11085 writer.writerow(unquote_colnames(colnames)) 11086 11087 def none_exception(value): 11088 """ 11089 returns a cleaned up value that can be used for csv export: 11090 - unicode text is encoded as such 11091 - None values are replaced with the given representation (default <NULL>) 11092 """ 11093 if value is None: 11094 return null 11095 elif isinstance(value, unicode): 11096 return value.encode('utf8') 11097 elif isinstance(value,Reference): 11098 return long(value) 11099 elif hasattr(value, 'isoformat'): 11100 return value.isoformat()[:19].replace('T', ' ') 11101 elif isinstance(value, (list,tuple)): # for type='list:..' 11102 return bar_encode(value) 11103 return value 11104 11105 for record in self: 11106 row = [] 11107 for col in colnames: 11108 m = self.db._adapter.REGEX_TABLE_DOT_FIELD.match(col) 11109 if not m: 11110 row.append(record._extra[col]) 11111 else: 11112 (t, f) = m.groups() 11113 field = self.db[t][f] 11114 if isinstance(record.get(t, None), (Row,dict)): 11115 value = record[t][f] 11116 else: 11117 value = record[f] 11118 if field.type=='blob' and not value is None: 11119 value = base64.b64encode(value) 11120 elif represent and field.represent: 11121 value = field.represent(value,record) 11122 row.append(none_exception(value)) 11123 writer.writerow(row) 11124
11125 - def xml(self,strict=False,row_name='row',rows_name='rows'):
11126 """ 11127 serializes the table using sqlhtml.SQLTABLE (if present) 11128 """ 11129 11130 if strict: 11131 ncols = len(self.colnames) 11132 return '<%s>\n%s\n</%s>' % (rows_name, 11133 '\n'.join(row.as_xml(row_name=row_name, 11134 colnames=self.colnames) for 11135 row in self), rows_name) 11136 11137 import sqlhtml 11138 return sqlhtml.SQLTABLE(self).xml()
11139
11140 - def as_xml(self,row_name='row',rows_name='rows'):
11141 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
11142
11143 - def as_json(self, mode='object', default=None):
11144 """ 11145 serializes the rows to a JSON list or object with objects 11146 mode='object' is not implemented (should return a nested 11147 object structure) 11148 """ 11149 11150 items = [record.as_json(mode=mode, default=default, 11151 serialize=False, 11152 colnames=self.colnames) for 11153 record in self] 11154 11155 if have_serializers: 11156 return serializers.json(items, 11157 default=default or 11158 serializers.custom_json) 11159 elif simplejson: 11160 return simplejson.dumps(items) 11161 else: 11162 raise RuntimeError("missing simplejson")
11163 11164 # for consistent naming yet backwards compatible 11165 as_csv = __str__ 11166 json = as_json 11167
11168 11169 ################################################################################ 11170 # dummy function used to define some doctests 11171 ################################################################################ 11172 11173 -def test_all():
11174 """ 11175 11176 >>> if len(sys.argv)<2: db = DAL("sqlite://test.db") 11177 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 11178 >>> tmp = db.define_table('users',\ 11179 Field('stringf', 'string', length=32, required=True),\ 11180 Field('booleanf', 'boolean', default=False),\ 11181 Field('passwordf', 'password', notnull=True),\ 11182 Field('uploadf', 'upload'),\ 11183 Field('blobf', 'blob'),\ 11184 Field('integerf', 'integer', unique=True),\ 11185 Field('doublef', 'double', unique=True,notnull=True),\ 11186 Field('jsonf', 'json'),\ 11187 Field('datef', 'date', default=datetime.date.today()),\ 11188 Field('timef', 'time'),\ 11189 Field('datetimef', 'datetime'),\ 11190 migrate='test_user.table') 11191 11192 Insert a field 11193 11194 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 11195 uploadf=None, integerf=5, doublef=3.14,\ 11196 jsonf={"j": True},\ 11197 datef=datetime.date(2001, 1, 1),\ 11198 timef=datetime.time(12, 30, 15),\ 11199 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 11200 1 11201 11202 Drop the table 11203 11204 >>> db.users.drop() 11205 11206 Examples of insert, select, update, delete 11207 11208 >>> tmp = db.define_table('person',\ 11209 Field('name'),\ 11210 Field('birth','date'),\ 11211 migrate='test_person.table') 11212 >>> person_id = db.person.insert(name='Marco',birth='2005-06-22') 11213 >>> person_id = db.person.insert(name='Massimo',birth='1971-12-21') 11214 11215 commented len(db().select(db.person.ALL)) 11216 commented 2 11217 11218 >>> me = db(db.person.id==person_id).select()[0] # test select 11219 >>> me.name 11220 'Massimo' 11221 >>> db.person[2].name 11222 'Massimo' 11223 >>> db.person(2).name 11224 'Massimo' 11225 >>> db.person(name='Massimo').name 11226 'Massimo' 11227 >>> db.person(db.person.name=='Massimo').name 11228 'Massimo' 11229 >>> row = db.person[2] 11230 >>> row.name == row['name'] == row['person.name'] == row('person.name') 11231 True 11232 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 11233 1 11234 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 11235 1 11236 11237 Update a single record 11238 11239 >>> me.update_record(name="Max") 11240 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 11241 >>> me.name 11242 'Max' 11243 11244 Examples of complex search conditions 11245 11246 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 11247 1 11248 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 11249 1 11250 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 11251 1 11252 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 11253 >>> me.name 11254 'Max' 11255 11256 Examples of search conditions using extract from date/datetime/time 11257 11258 >>> len(db(db.person.birth.month()==12).select()) 11259 1 11260 >>> len(db(db.person.birth.year()>1900).select()) 11261 1 11262 11263 Example of usage of NULL 11264 11265 >>> len(db(db.person.birth==None).select()) ### test NULL 11266 0 11267 >>> len(db(db.person.birth!=None).select()) ### test NULL 11268 1 11269 11270 Examples of search conditions using lower, upper, and like 11271 11272 >>> len(db(db.person.name.upper()=='MAX').select()) 11273 1 11274 >>> len(db(db.person.name.like('%ax')).select()) 11275 1 11276 >>> len(db(db.person.name.upper().like('%AX')).select()) 11277 1 11278 >>> len(db(~db.person.name.upper().like('%AX')).select()) 11279 0 11280 11281 orderby, groupby and limitby 11282 11283 >>> people = db().select(db.person.name, orderby=db.person.name) 11284 >>> order = db.person.name|~db.person.birth 11285 >>> people = db().select(db.person.name, orderby=order) 11286 11287 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 11288 11289 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 11290 11291 Example of one 2 many relation 11292 11293 >>> tmp = db.define_table('dog',\ 11294 Field('name'),\ 11295 Field('birth','date'),\ 11296 Field('owner',db.person),\ 11297 migrate='test_dog.table') 11298 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 11299 1 11300 11301 A simple JOIN 11302 11303 >>> len(db(db.dog.owner==db.person.id).select()) 11304 1 11305 11306 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 11307 1 11308 11309 Drop tables 11310 11311 >>> db.dog.drop() 11312 >>> db.person.drop() 11313 11314 Example of many 2 many relation and Set 11315 11316 >>> tmp = db.define_table('author', Field('name'),\ 11317 migrate='test_author.table') 11318 >>> tmp = db.define_table('paper', Field('title'),\ 11319 migrate='test_paper.table') 11320 >>> tmp = db.define_table('authorship',\ 11321 Field('author_id', db.author),\ 11322 Field('paper_id', db.paper),\ 11323 migrate='test_authorship.table') 11324 >>> aid = db.author.insert(name='Massimo') 11325 >>> pid = db.paper.insert(title='QCD') 11326 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 11327 11328 Define a Set 11329 11330 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 11331 >>> rows = authored_papers.select(db.author.name, db.paper.title) 11332 >>> for row in rows: print row.author.name, row.paper.title 11333 Massimo QCD 11334 11335 Example of search condition using belongs 11336 11337 >>> set = (1, 2, 3) 11338 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 11339 >>> print rows[0].title 11340 QCD 11341 11342 Example of search condition using nested select 11343 11344 >>> nested_select = db()._select(db.authorship.paper_id) 11345 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 11346 >>> print rows[0].title 11347 QCD 11348 11349 Example of expressions 11350 11351 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 11352 >>> db(mynumber).delete() 11353 0 11354 >>> for i in range(10): tmp = mynumber.insert(x=i) 11355 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 11356 45 11357 11358 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 11359 5 11360 11361 Output in csv 11362 11363 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 11364 author.name,paper.title\r 11365 Massimo,QCD 11366 11367 Delete all leftover tables 11368 11369 >>> DAL.distributed_transaction_commit(db) 11370 11371 >>> db.mynumber.drop() 11372 >>> db.authorship.drop() 11373 >>> db.author.drop() 11374 >>> db.paper.drop() 11375 """
11376 ################################################################################ 11377 # deprecated since the new DAL; here only for backward compatibility 11378 ################################################################################ 11379 11380 SQLField = Field 11381 SQLTable = Table 11382 SQLXorable = Expression 11383 SQLQuery = Query 11384 SQLSet = Set 11385 SQLRows = Rows 11386 SQLStorage = Row 11387 SQLDB = DAL 11388 GQLDB = DAL 11389 DAL.Field = Field # was necessary in gluon/globals.py session.connect 11390 DAL.Table = Table # was necessary in gluon/globals.py session.connect
11391 11392 ################################################################################ 11393 # Geodal utils 11394 ################################################################################ 11395 11396 -def geoPoint(x,y):
11397 return "POINT (%f %f)" % (x,y)
11398
11399 -def geoLine(*line):
11400 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
11401
11402 -def geoPolygon(*line):
11403 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
11404 11405 ################################################################################ 11406 # run tests 11407 ################################################################################ 11408 11409 if __name__ == '__main__': 11410 import doctest 11411 doctest.testmod() 11412